path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
07-word2vec.ipynb | ###Markdown
07 - Building EmbeddingsSo far, we always performed the embedding lookup on a variable that is trained as we go.This means that we are learning the embeddings every time we train.A better option would be to pretrain our embeddings on a larger dataset and use this pretrained embedding in our final model.We have two options:1. use pretrained embeddings like Glove2. train our own embeddings on our rap lyricsWe will first try to train our own embeddings, as rap might be a very specific type of text and general embeddings might not be applicable.
###Code
import os
import math
import glob
import numpy as np
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import tools.processing as pre
batch_size = 256
# embedding_dimension = 128
embedding_dimension = 10
negative_samples = 32
LOG_DIR = "logs/word2vec_intro"
EPOCHS = 20
tf.reset_default_graph()
text = pre.get_text("data/final_2_pac_rakim_kid_cudi.txt")
sentences = []
sentences = [text.replace( "\n", ";" )]
# Map words to indices
word2index_map = {}
index = 0
print(sentences[0][:300])
vocab = pre.Vocabulary(sentences[0])
for sent in sentences:
for word in sent.lower().split():
if word not in word2index_map:
word2index_map[word] = index
index += 1
# index2word_map = {index: word for word, index in word2index_map.items()}
index2word_map = vocab.index2word
word2index_map = vocab._dict
vocabulary_size = len(index2word_map)
# Generate skip-gram pairs
skip_gram_pairs = []
for sent in sentences:
tokenized_sent = sent.lower().split()
for i in range(1, len(tokenized_sent)-1):
word_context_pair = [[word2index_map[tokenized_sent[i-1]],
word2index_map[tokenized_sent[i+1]]],
word2index_map[tokenized_sent[i]]]
skip_gram_pairs.append([word_context_pair[1],
word_context_pair[0][0]])
skip_gram_pairs.append([word_context_pair[1],
word_context_pair[0][1]])
def get_skipgram_batch(batch_size):
instance_indices = list(range(len(skip_gram_pairs)))
np.random.shuffle(instance_indices)
batch = instance_indices[:batch_size]
x = [skip_gram_pairs[i][0] for i in batch]
y = [[skip_gram_pairs[i][1]] for i in batch]
return x, y
# batch example
x_batch, y_batch = get_skipgram_batch(8)
x_batch
y_batch
[index2word_map[word] for word in x_batch]
[index2word_map[word[0]] for word in y_batch]
# Input data, labels
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# Embedding lookup table currently only implemented in CPU
with tf.name_scope("embeddings"):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_dimension],
-1.0, 1.0), name='embedding')
# This is essentialy a lookup table
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Create variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_dimension],
stddev=1.0 / math.sqrt(embedding_dimension)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, inputs=embed, labels=train_labels,
num_sampled=negative_samples, num_classes=vocabulary_size))
tf.summary.scalar("NCE_loss", loss)
# Learning rate decay
global_step = tf.Variable(0, trainable=False)
learningRate = tf.train.exponential_decay(learning_rate=0.1,
global_step=global_step,
decay_steps=1000,
decay_rate=0.95,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learningRate).minimize(loss)
merged = tf.summary.merge_all()
TRAIN = True
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(LOG_DIR,
graph=tf.get_default_graph())
with open(os.path.join(LOG_DIR, 'metadata.tsv'), "w") as metadata:
metadata.write('Name\tClass\n')
for k, v in index2word_map.items():
metadata.write('%s\t%d\n' % (v, k))
if glob.glob(LOG_DIR + '/*.meta'):
TRAIN = False
# global_step = sess.run(global_step)
print("Restoring an old model and training it further..")
else:
print("Building model from scratch!")
# global_step = 0
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embeddings.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = 'metadata.tsv'
projector.visualize_embeddings(train_writer, config)
if TRAIN:
global_step = 0
for epoch in range(EPOCHS):
print(f"\n\nepoch: {epoch}\n")
# epoch_steps = (int(len(skip_gram_pairs)/batch_size))
epoch_steps = 1000
for step in range(epoch_steps):
x_batch, y_batch = get_skipgram_batch(batch_size)
summary, _ = sess.run([merged, train_step],
feed_dict={train_inputs: x_batch,
train_labels: y_batch})
train_writer.add_summary(summary, step + global_step)
if step % 100 == 0:
loss_value = sess.run(loss,
feed_dict={train_inputs: x_batch,
train_labels: y_batch})
print("Loss at %d/%d: %.5f" % (step, epoch_steps, loss_value))
global_step += epoch_steps
# Normalize embeddings before using
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
normalized_embeddings_matrix = sess.run(normalized_embeddings)
ref_word = normalized_embeddings_matrix[word2index_map["hate"]]
###Output
scantron ; aw ; surely ; shining ; something ; 88 ; trined ; right ; electric ; coasts ; coast ; togine ; marriage ; marriages ; 501 ; wizardry ; wizard ; wizard ; ay ; is ; ; ; yeah you know what this is nyc ; the triumphant return rakim allah ; rakim ; remember being introduced to rapping your fir
Restoring an old model and training it further..
###Markdown
Now with our pretrained embeddings matrix, we can check how related the words are
###Code
def get_closest(word):
ref_word = normalized_embeddings_matrix[word2index_map[word]]
cosine_dists = np.dot(normalized_embeddings_matrix, ref_word)
ff = np.argsort(cosine_dists)[::-1][0:10]
for f in ff:
print(index2word_map[f])
print(cosine_dists[f])
get_closest("call")
get_closest("death")
get_closest("gun")
get_closest("pistol")
###Output
pistol
1.0000001
experiment
0.9432442
hundred
0.9354329
rhymefumble
0.9113585
capacities
0.8776408
dominant
0.86174405
enter
0.8565933
conceited
0.85169804
gomars
0.84910536
quatro
0.83368963
|
tirgul3.ipynb | ###Markdown
Tirgul 3 Add nice equations using LaTEX syntax in Markdown Sphere or Quadratic equations::$ x^2 + y^2 = z^2 \\$$ \\ x_1,_2 = \frac{- b\pm \sqrt{b^2-4ac}}{2a} $ master chef https://www.facebook.com/MicrosoftRnDil/videos/1090576338109340/
###Code
# get file from web
import pandas as pd
url = "https://data.humdata.org/hxlproxy/api/data-preview.csv?url=https%3A%2F%2Fraw.githubusercontent.com%2FCSSEGISandData%2FCOVID-19%2Fmaster%2Fcsse_covid_19_data%2Fcsse_covid_19_time_series%2Ftime_series_covid19_deaths_global.csv"
cuvid19Deaths = pd.read_csv(url)
cuvid19Deaths.head()
# get Israel data
cuvid19Deaths[cuvid19Deaths['Country/Region'].str.contains("Israel")]
data = pd.read_csv('titanic_short.csv')
data.head()
data['age']
# is null - isna
dataIsNull = pd.isnull(data['age'])
dataIsNull
# isna is exactly the same (due to historical reasons related to 'R' language)
dataIsna = pd.isna(data['age'])
dataIsna
# we can present the data at these null rows
filteredData = data[pd.isna(data['age'])]
filteredData
# What if we want to only the cell with no null:
# we could us the ~ operate
filteredData = data[~pd.isna(data['age'])]
filteredData.head(13)
# or the notnull command
filteredData = data[pd.notnull(data['age'])]
filteredData.head(13)
# Mean
dataAge = filteredData['age']
dataAge.mean()
dataAge.min()
dataAge.max()
dataAge.median()
###Output
_____no_output_____
###Markdown
NumpyMatrices and much more, this is where the fun begins..[Numpy official docs](https://numpy.org/doc/stable/reference/index.html)
###Code
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
print(arr*arr)
#### vector multiplication (AKA dot producdt)
print("arr.dot(arr)\t=",arr.dot(arr))
# or by
print("arr@arr\t\t=",arr@arr)
# create ones matrix of specific size:
np.ones((2,3))
# self excercise: create zeros matrix of specific size 4X3:
np.zeros((2,3))
# The I matrix
d = np.eye(3)
print(d)
# self excercise: create the following 3X3 diagonal matrix
# [1., 0., 0.],
# [0., 2., 0.],
# [0., 0., 3.]
print([1,2,3]*np.eye(3))
# Another Way
print(np.diag([1,2,3]))
# random
e = np.random.random((2,4))
print(e)
# append
arr1 = np.array([1, 2, 3, 4, 5])
arr2 = np.array([11, 12, 13, 14, 15])
arr3 = np.append(arr1,arr2)
print(arr3)
# we can add arrays of same shape
print(arr1+arr2)
# However arrays must have same size
arr3 = np.array([1,2])
print(arr1+arr3) # ERROR
# self excercise: Assume hypothethical case where you wish for each student to calculate average of the highest 5 grades out of 6 student's assignment
# step 1. generate 10X6 random integer numbers between 70 to 100 to represent students grades
# step 2. calculate mean of top 5 grades for each student
numCols = 6
grades = np.random.randint(70,100,size=(10,numCols))
print(grades)
top5Avg = (grades.sum(axis=1)-grades.min(axis=1)) / (numCols-1)
top5Avg
top5Avg = top5Avg.reshape(len(top5Avg),1) # for display purpose
print(top5Avg)
###Output
[[83. ]
[88.4]
[88. ]
[82.8]
[79.6]
[88.6]
[89.2]
[86.2]
[85.8]
[86.2]]
###Markdown
Tirgul 3 Add nice equations using LaTEX syntax in Markdown Sphere or Quadratic equations::$ x^2 + y^2 = z^2 \\$$ \\ x_1,_2 = \frac{- b\pm \sqrt{b^2-4ac}}{2a} $ master chef https://www.facebook.com/MicrosoftRnDil/videos/1090576338109340/
###Code
# get file from web
import pandas as pd
url = "https://data.humdata.org/hxlproxy/api/data-preview.csv?url=https%3A%2F%2Fraw.githubusercontent.com%2FCSSEGISandData%2FCOVID-19%2Fmaster%2Fcsse_covid_19_data%2Fcsse_covid_19_time_series%2Ftime_series_covid19_deaths_global.csv"
cuvid19Deaths = pd.read_csv(url)
cuvid19Deaths.head()
# get Israel data
# cuvid19Deaths.[cuvid19Deaths['Country/Region'] == "Israel"]
cuvid19Deaths[cuvid19Deaths['Country/Region'].str.contains("Is")]
cuvid19Deaths[:1].iloc[:,4:]
data = pd.read_csv('titanic_short.csv')
data.head(3)
data[['age']]
# is null - isna
dataIsNull = pd.isnull(data['age'])
data[['age']].isnull().sum()
pd.isnull(data).sum()
# isna is exactly the same (due to historical reasons related to 'R' language)
dataIsna = pd.isna(data['age'])
dataIsna
# we can present the data at these null rows
filteredData = data[pd.notna(data['age'])]
filteredData
# What if we want to only the cell with no null:
# we could us the ~ operation
filteredData = data[~(data['age']>4)]
filteredData.head(13)
# or the notnull command
filteredData = data[pd.notnull(data['age'])]
filteredData.head(13)
data['age'].mean()
# Mean
dataAge = filteredData['age']
dataAge
dataAge.mean()
dataAge.min()
dataAge.max()
dataAge.median()
###Output
_____no_output_____
###Markdown
NumpyMatrices and much more, this is where the fun begins..[Numpy official docs](https://numpy.org/doc/stable/reference/index.html)
###Code
import numpy as np
arr = np.array([[1, 2, 3, 4, 5]])
# arr = np.array([
# [1, 2, 3],
# [4, 5, 6],
# [7, 8, 9]])
print(arr.shape)
print(arr)
arr.T @ arr
# print(arr.shape)
# print(arr*arr)
# arr_t = arr.T
# arr @ (arr_t)
# arr.T.dot(arr)
#### vector multiplication (AKA dot producdt)
print("arr.dot(arr)\t=",arr.dot(arr))
# or by
print("arr@arr\t\t=",arr@arr)
# create ones matrix of specific size:
np.ones((2,3,),dtype=int)*4
# self excercise: create zeros matrix of specific size 4X3:
np.zeros((2,3))
# The I matrix
d = np.eye(2)
print(d)
np.array([1,2,3])*np.eye(3)
# self excercise: create the following 3X3 diagonal matrix
# [1., 0., 0.],
# [0., 2., 0.],
# [0., 0., 3.]
print([1,2,3]*np.eye(3))
# Another Way
print(np.diag([1,2,3,5]))
# random
e = np.random.randint(low=0,high=10,size=(2,4))
print(e)
# append
arr1 = np.array([1, 2, 3, 4, 5])
arr2 = np.array([11, 12, 13, 14, 15])
arr3 = np.append(arr1,arr2)
print(arr3)
# we can add arrays of same shape
print(arr1+arr2)
# However arrays must have same size
arr3 = np.array([1,2])
print(arr1+arr3) # ERROR
# self excercise: Assume hypothethical case where you wish for each student to calculate average of the highest 5 grades out of 6 student's assignment
# step 1. generate 10X6 random integer numbers between 70 to 100 to represent students grades
# step 2. calculate mean of top 5 grades for each student
numCols = 6
grades = np.random.randint(70,100,size=(10,numCols))
print(grades)
top5Avg = (grades.sum(axis=1)-grades.min(axis=1)) / (numCols-1)
top5Avg
top5Avg = top5Avg.reshape(len(top5Avg),1) # for display purpose
print(top5Avg)
a = np.random.randint(0,100,(10,5))
a = np.array([
[1,2,3,4],
[5,6,7,8]
])
a.mean(axis=0)
###Output
_____no_output_____ |
tutorials/time-series-visualization-with-altair/tutorial.ipynb | ###Markdown
Time Series Visualization with AltairAuthor: jdbcodeThis tutorial provides methods for generating time series data in Earth Engine and visualizing it with the [Altair](https://altair-viz.github.io/) library using drought and vegetation response as an example.Topics include:- Time series region reduction in Earth Engine- Formatting a table in Earth Engine- Transferring an Earth Engine table to a Colab Python kernel- Converting an Earth Engine table to a [pandas](https://pandas.pydata.org/) DataFrame- Data representation with various Altair chart types**Note** that this tutorial uses the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) in a [Colab notebook](https://developers.google.com/earth-engine/python_install-colab.html). ContextAt the heart of this tutorial is the notion of data reduction and the need to transform data into insights to help inform our understanding of Earth processes and human's role in them. It combines a series of technologies, each best suited to a particular task in the data reduction process. **Earth Engine** is used to access, clean, and reduce large amounts of spatiotemporal data, **pandas** is used to analyze and organize the results, and **Altair** is used to visualize the results.**Note**: This notebook demonstrates an analysis template and interactive workflow that is appropriate for a certain size of dataset, but there are limitations to interactive computation time and server-to-client data transfer size imposed by Colab and Earth Engine. To analyze even larger datasets, you may need to modify the workflow to [export](https://developers.google.com/earth-engine/python_installexporting-data) `FeatureCollection` results from Earth Engine as static assets and then use the static assets to perform the subsequent steps involving Earth Engine table formatting, conversion to pandas DataFrame, and charting with Altair. Materials DatasetsClimate- Drought severity ([PDSI](https://developers.google.com/earth-engine/datasets/catalog/IDAHO_EPSCOR_PDSI))- Historical climate ([PRISM](https://developers.google.com/earth-engine/datasets/catalog/OREGONSTATE_PRISM_AN81m))- Projected climate ([NEX-DCP30](https://developers.google.com/earth-engine/datasets/catalog/NASA_NEX-DCP30))Vegetation proxies- NDVI ([MODIS](https://developers.google.com/earth-engine/datasets/catalog/MODIS_006_MOD13A2))- NBR ([Landsat](https://developers.google.com/earth-engine/datasets/catalog/landsat/)) Region of interestThe region of interest for these examples is the Sierra Nevada ecoregion of California. The vegetation grades from mostly ponderosa pine and Douglas-fir at low elevations on the western side, to pines and Sierra juniper on the eastern side, and to fir and other conifers at higher elevations. General workflowPreparation of every dataset for visualization follows the same basic steps:1. Filter the dataset (server-side Earth Engine)2. Reduce the data region by a statistic (server-side Earth Engine)3. Format the region reduction into a table (server-side Earth Engine)4. Convert the Earth Engine table to a DataFrame (server-side Earth Engine > client-side Python kernel)5. Alter the DataFrame (client-side pandas)6. Plot the DataFrame (client-side Altair)The first dataset will walk through each step in detail. Following examples will provide less description, unless there is variation that merits note. Python setup Earth Engine API1. Import the Earth Engine library.2. Authenticate access (registration verification and Google account access).3. Initialize the API.
###Code
import ee
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Other librariesImport other libraries used in this notebook.- [**pandas**](https://pandas.pydata.org/): data analysis (including the [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) data structure)- [**altair**](https://altair-viz.github.io/): declarative visualization library (used for charting)- [**numpy**](https://numpy.org/): array-processing package (used for linear regression)- [**folium**](https://python-visualization.github.io/folium/): interactive web map
###Code
import pandas as pd
import altair as alt
import numpy as np
import folium
###Output
_____no_output_____
###Markdown
Region reduction functionReduction of pixels intersecting the region of interest to a statistic will be performed multiple times. Define a reusable function that can perform the task for each dataset. The function accepts arguments such as scale and reduction method to parameterize the operation for each particular analysis.**Note**: most of the reduction operations in this tutorial use a large pixel scale so that operations complete quickly. In your own application, set the scale and other parameter arguments as you wish.
###Code
def create_reduce_region_function(geometry,
reducer=ee.Reducer.mean(),
scale=1000,
crs='EPSG:4326',
bestEffort=True,
maxPixels=1e13,
tileScale=4):
"""Creates a region reduction function.
Creates a region reduction function intended to be used as the input function
to ee.ImageCollection.map() for reducing pixels intersecting a provided region
to a statistic for each image in a collection. See ee.Image.reduceRegion()
documentation for more details.
Args:
geometry:
An ee.Geometry that defines the region over which to reduce data.
reducer:
Optional; An ee.Reducer that defines the reduction method.
scale:
Optional; A number that defines the nominal scale in meters of the
projection to work in.
crs:
Optional; An ee.Projection or EPSG string ('EPSG:5070') that defines
the projection to work in.
bestEffort:
Optional; A Boolean indicator for whether to use a larger scale if the
geometry contains too many pixels at the given scale for the operation
to succeed.
maxPixels:
Optional; A number specifying the maximum number of pixels to reduce.
tileScale:
Optional; A number representing the scaling factor used to reduce
aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable
computations that run out of memory with the default.
Returns:
A function that accepts an ee.Image and reduces it by region, according to
the provided arguments.
"""
def reduce_region_function(img):
"""Applies the ee.Image.reduceRegion() method.
Args:
img:
An ee.Image to reduce to a statistic by region.
Returns:
An ee.Feature that contains properties representing the image region
reduction results per band and the image timestamp formatted as
milliseconds from Unix epoch (included to enable time series plotting).
"""
stat = img.reduceRegion(
reducer=reducer,
geometry=geometry,
scale=scale,
crs=crs,
bestEffort=bestEffort,
maxPixels=maxPixels,
tileScale=tileScale)
return ee.Feature(geometry, stat).set({'millis': img.date().millis()})
return reduce_region_function
###Output
_____no_output_____
###Markdown
FormattingThe result of the region reduction function above applied to an `ee.ImageCollection` produces an `ee.FeatureCollection`. This data needs to be transferred to the Python kernel, but serialized feature collections are large and awkward to deal with. This step defines a function to convert the feature collection to an `ee.Dictionary` where the keys are feature property names and values are corresponding lists of property values, which `pandas` can deal with handily.1. Extract the property values from the `ee.FeatureCollection` as a list of lists stored in an `ee.Dictionary` using `reduceColumns()`.2. Extract the list of lists from the dictionary.3. Add names to each list by converting to an `ee.Dictionary` where keys are property names and values are the corresponding value lists.The returned `ee.Dictionary` is essentially a table, where keys define columns and list elements define rows.
###Code
# Define a function to transfer feature properties to a dictionary.
def fc_to_dict(fc):
prop_names = fc.first().propertyNames()
prop_lists = fc.reduceColumns(
reducer=ee.Reducer.toList().repeat(prop_names.size()),
selectors=prop_names).get('list')
return ee.Dictionary.fromLists(prop_names, prop_lists)
###Output
_____no_output_____
###Markdown
Drought severityIn this section we'll look at a time series of drought severity as a calendar heat map and a bar chart. Import data1. Load the gridded Palmer Drought Severity Index (PDSI) data as an `ee.ImageCollection`.2. Load the EPA Level-3 ecoregion boundaries as an `ee.FeatureCollection` and filter it to include only the Sierra Nevada region, which defines the area of interest (AOI).
###Code
pdsi = ee.ImageCollection('IDAHO_EPSCOR/PDSI')
aoi = ee.FeatureCollection('EPA/Ecoregions/2013/L3').filter(
ee.Filter.eq('na_l3name', 'Sierra Nevada')).geometry()
###Output
_____no_output_____
###Markdown
**Note**: the `aoi` defined above will be used throughout this tutorial. In your own application, redefine it for your own area of interest. Reduce data1. Create a region reduction function.2. Map the function over the `pdsi` image collection to reduce each image.3. Filter out any resulting features that have null computed values (occurs when all pixels in an AOI are masked).
###Code
reduce_pdsi = create_reduce_region_function(
geometry=aoi, reducer=ee.Reducer.mean(), scale=5000, crs='EPSG:3310')
pdsi_stat_fc = ee.FeatureCollection(pdsi.map(reduce_pdsi)).filter(
ee.Filter.notNull(pdsi.first().bandNames()))
###Output
_____no_output_____
###Markdown
---**STOP**: _Optional export__If your process is long-running_, you'll want to export the `pdsi_stat_fc` variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on. Please see the Developer Guide section on [exporting with the Python API](https://developers.google.com/earth-engine/python_installexporting-data).Export to asset:
###Code
"""
task = ee.batch.Export.table.toAsset(
collection=pdsi_stat_fc,
description='pdsi_stat_fc export',
assetId='users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair')
task.start()
"""
###Output
_____no_output_____
###Markdown
Import the asset after the export completes:
###Code
"""
pdsi_stat_fc = ee.FeatureCollection('users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair')
"""
###Output
_____no_output_____
###Markdown
_\* Remove triple quote comment fence to run the above cells._ ---**CONTINUE**: Server to client transfer The `ee.FeatureCollection` needs to be converted to a dictionary and transferred to the Python kernel.1. Apply the `fc_to_dict` function to convert from `ee.FeatureCollection` to `ee.Dictionary`.2. Call `getInfo()` on the `ee.Dictionary` to transfer the data client-side.
###Code
pdsi_dict = fc_to_dict(pdsi_stat_fc).getInfo()
###Output
_____no_output_____
###Markdown
The result is a Python dictionary. Print a small part to see how it is formatted.
###Code
print(type(pdsi_dict), '\n')
for prop in pdsi_dict.keys():
print(prop + ':', pdsi_dict[prop][0:3] + ['...'])
###Output
_____no_output_____
###Markdown
Convert the Python dictionary to a pandas DataFrame.
###Code
pdsi_df = pd.DataFrame(pdsi_dict)
###Output
_____no_output_____
###Markdown
Preview the DataFrame and check the column data types.
###Code
display(pdsi_df)
print(pdsi_df.dtypes)
###Output
_____no_output_____
###Markdown
Add date columns Add date columns derived from the milliseconds from Unix epoch column. The pandas library provides functions and objects for timestamps and the DataFrame object allows for easy mutation.Define a function to add date variables to the DataFrame: year, month, day, and day of year (DOY).
###Code
# Function to add date variables to DataFrame.
def add_date_info(df):
df['Timestamp'] = pd.to_datetime(df['millis'], unit='ms')
df['Year'] = pd.DatetimeIndex(df['Timestamp']).year
df['Month'] = pd.DatetimeIndex(df['Timestamp']).month
df['Day'] = pd.DatetimeIndex(df['Timestamp']).day
df['DOY'] = pd.DatetimeIndex(df['Timestamp']).dayofyear
return df
###Output
_____no_output_____
###Markdown
**Note**: the above function for adding date information to a DataFrame will be used throughout this tutorial. Apply the `add_date_info` function to the PDSI DataFrame to add date attribute columns, preview the results.
###Code
pdsi_df = add_date_info(pdsi_df)
pdsi_df.head(5)
###Output
_____no_output_____
###Markdown
Rename and drop columns Often it is desirable to rename columns and/or remove unnecessary columns. Do both here and preview the DataFrame.
###Code
pdsi_df = pdsi_df.rename(columns={
'pdsi': 'PDSI'
}).drop(columns=['millis', 'system:index'])
pdsi_df.head(5)
###Output
_____no_output_____
###Markdown
Check the data type of each column.
###Code
pdsi_df.dtypes
###Output
_____no_output_____
###Markdown
At this point the DataFrame is in good shape for charting with Altair. Calendar heatmap Chart PDSI data as a calendar heatmap. Set observation year as the x-axis variable, month as y-axis, and PDSI value as color.Note that Altair features a convenient [method for aggregating values within groups](https://altair-viz.github.io/user_guide/transform/aggregate.html) while encoding the chart (i.e., no need to create a new DataFrame). The mean aggregate transform is applied here because each month has three PDSI observations (year and month are the grouping factors).Also note that a tooltip has been added to the chart; hovering over cells reveals the values of the selected variables.
###Code
alt.Chart(pdsi_df).mark_rect().encode(
x='Year:O',
y='Month:O',
color=alt.Color(
'mean(PDSI):Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('Month:O', title='Month'),
alt.Tooltip('mean(PDSI):Q', title='PDSI')
]).properties(width=600, height=300)
###Output
_____no_output_____
###Markdown
The calendar heat map is good for interpretation of relative intra- and inter-annual differences in PDSI. However, since the PDSI variable is represented by color, estimating absolute values and magnitude of difference is difficult. Bar chart Chart PDSI time series as a bar chart to more easily interpret absolute values and compare them over time. Here, the observation timestamp is represented on the x-axis and PDSI is represented by both the y-axis and color. Since each PDSI observation has a unique timestamp that can be plotted to the x-axis, there is no need to aggregate PDSI values as in the above chart. A tooltip is added to the chart; hover over the bars to reveal the values for each variable.
###Code
alt.Chart(pdsi_df).mark_bar(size=1).encode(
x='Timestamp:T',
y='PDSI:Q',
color=alt.Color(
'PDSI:Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))),
tooltip=[
alt.Tooltip('Timestamp:T', title='Date'),
alt.Tooltip('PDSI:Q', title='PDSI')
]).properties(width=600, height=300)
###Output
_____no_output_____
###Markdown
This temporal bar chart makes it easier to interpret and compare absolute values of PDSI over time, but relative intra- and inter-annual variability are arguably harder to interpret because the division of year and month is not as distinct as in the calendar heatmap above.Take note of the extended and severe period of drought from 2012 through 2016. In the next section, we'll look for a vegetation response to this event. Vegetation productivityNDVI is a proxy measure of photosynthetic capacity and is used in this tutorial to investigate vegetation response to the 2012-2016 drought identified in the PDSI bar chart above.MODIS provides an analysis-ready 16-day NDVI composite that is well suited for regional investigation of temporal vegetation dynamics. The following steps reduce and prepare this data for charting in the same manner as the PDSI data above; please refer to previous sections to review details. Import and reduce1. Load the MODIS NDVI data as an `ee.ImageCollection`.1. Create a region reduction function.3. Apply the function to all images in the time series.4. Filter out features with null computed values.
###Code
ndvi = ee.ImageCollection('MODIS/006/MOD13A2').select('NDVI')
reduce_ndvi = create_reduce_region_function(
geometry=aoi, reducer=ee.Reducer.mean(), scale=1000, crs='EPSG:3310')
ndvi_stat_fc = ee.FeatureCollection(ndvi.map(reduce_ndvi)).filter(
ee.Filter.notNull(ndvi.first().bandNames()))
###Output
_____no_output_____
###Markdown
---**STOP**: _If your process is long-running_, you'll want to export the `ndvi_stat_fc` variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on.Please see the above **_Optional export_** section for more details.**CONTINUE**:--- Prepare DataFrame 1. Transfer data from the server to the client.2. Convert the Python dictionary to a pandas DataFrame.3. Preview the DataFrame and check data types.
###Code
ndvi_dict = fc_to_dict(ndvi_stat_fc).getInfo()
ndvi_df = pd.DataFrame(ndvi_dict)
display(ndvi_df)
print(ndvi_df.dtypes)
###Output
_____no_output_____
###Markdown
4. Remove the NDVI scaling.5. Add date attribute columns.6. Preview the DataFrame.
###Code
ndvi_df['NDVI'] = ndvi_df['NDVI'] / 10000
ndvi_df = add_date_info(ndvi_df)
ndvi_df.head(5)
###Output
_____no_output_____
###Markdown
These NDVI time series data are now ready for plotting. DOY line chart Make a day of year (DOY) line chart where each line represents a year of observations. This chart makes it possible to compare the same observation date among years. Use it to compare NDVI values for years during the drought and not.Day of year is represented on the x-axis and NDVI on the y-axis. Each line represents a year and is distinguished by color. Note that this plot includes a tooltip and has been made interactive so that the axes can be zoomed and panned.
###Code
highlight = alt.selection(
type='single', on='mouseover', fields=['Year'], nearest=True)
base = alt.Chart(ndvi_df).encode(
x=alt.X('DOY:Q', scale=alt.Scale(domain=[0, 353], clamp=True)),
y=alt.Y('NDVI:Q', scale=alt.Scale(domain=[0.1, 0.6])),
color=alt.Color('Year:O', scale=alt.Scale(scheme='magma')))
points = base.mark_circle().encode(
opacity=alt.value(0),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('DOY:Q', title='DOY'),
alt.Tooltip('NDVI:Q', title='NDVI')
]).add_selection(highlight)
lines = base.mark_line().encode(
size=alt.condition(~highlight, alt.value(1), alt.value(3)))
(points + lines).properties(width=600, height=350).interactive()
###Output
_____no_output_____
###Markdown
The first thing to note is that winter dates (when there is snow in the Sierra Nevada ecoregion) exhibit highly variable inter-annual NDVI, but spring, summer, and fall dates are more consistent. With regard to drought effects on vegetation, summer and fall dates are the most sensitive time. Zooming into observations for the summer/fall days (224-272), you'll notice that many years have a u-shaped pattern where NDVI values decrease and then rise. Another way to view these data is to plot the distribution of NDVI by DOY represented as an interquartile range envelope and median line. Here, these two charts are defined and then combined in the following snippet.1. Define a base chart.2. Define a line chart for median NDVI (note the use of aggregate median transform grouping by DOY).3. Define a band chart using `'iqr'` (interquartile range) to represent NDVI distribution grouping on DOY.4. Combine the line and band charts.
###Code
base = alt.Chart(ndvi_df).encode(
x=alt.X('DOY:Q', scale=alt.Scale(domain=(150, 340))))
line = base.mark_line().encode(
y=alt.Y('median(NDVI):Q', scale=alt.Scale(domain=(0.47, 0.53))))
band = base.mark_errorband(extent='iqr').encode(
y='NDVI:Q')
(line + band).properties(width=600, height=300).interactive()
###Output
_____no_output_____
###Markdown
The summary statistics for the summer/fall days (224-272) certainly show an NDVI reduction, but there is also variability; some years exhibit greater NDVI reduction than others as suggested by the wide interquartile range during the middle of the summer. Assuming that NDVI reduction is due to water and heat limiting photosynthesis, we can hypothesize that during years of drought, photosynthesis (NDVI) will be lower than non-drought years. We can investigate the relationship between photosynthesis (NDVI) and drought (PDSI) using a scatter plot and linear regression. Dought and productivity relationshipA scatterplot is a good way to visualize the relationship between two variables. Here, PDSI (drought indicator) will be plotted on the x-axis and NDVI (vegetation productivity) on the y-axis. To achieve this, both variables must exist in the same DataFrame. Each row will be an observation in time and columns will correspond to PDSI and NDVI values. Currently, PDSI and NDVI are in two different DataFrames and need to be merged. Prepare DataFramesBefore they can be merged, each variable must be reduced to a common temporal observation unit to define correspondence. There are a number of ways to do this and each will define the relationship between PDSI and NDVI differently. Here, our temporal unit will be an annual observation set where NDVI is reduced to the intra-annual minimum from DOY 224 to 272 and PDSI will be the mean from DOY 1 to 272. We are proposing that average drought severity for the first three quarters of a year are related to minimum summer NDVI for a given year.1. Filter the NDVI DataFrame to observations that occur between DOY 224 and 272.2. Reduce the DOY-filtered subset to intra-annual minimum NDVI.
###Code
ndvi_doy_range = [224, 272]
ndvi_df_sub = ndvi_df[(ndvi_df['DOY'] >= ndvi_doy_range[0])
& (ndvi_df['DOY'] <= ndvi_doy_range[1])]
ndvi_df_sub = ndvi_df_sub.groupby('Year').agg('min')
###Output
_____no_output_____
###Markdown
**Note**: in your own application you may find that a different DOY range is more suitable, change the `ndvi_doy_range` as needed. 3. Filter the PDSI DataFrame to observations that occur between DOY 1 and 272.4. Reduce the values within a given year to the mean of the observations.
###Code
pdsi_doy_range = [1, 272]
pdsi_df_sub = pdsi_df[(pdsi_df['DOY'] >= pdsi_doy_range[0])
& (pdsi_df['DOY'] <= pdsi_doy_range[1])]
pdsi_df_sub = pdsi_df_sub.groupby('Year').agg('mean')
###Output
_____no_output_____
###Markdown
**Note**: in your own application you may find that a different DOY range is more suitable, change the `pdsi_doy_range` as needed. 5. Perform a join on 'Year' to combine the two reduced DataFrames.6. Select only the columns of interest: 'Year', 'NDVI', 'PDSI'.7. Preview the DataFrame.
###Code
ndvi_pdsi_df = pd.merge(
ndvi_df_sub, pdsi_df_sub, how='left', on='Year').reset_index()
ndvi_pdsi_df = ndvi_pdsi_df[['Year', 'NDVI', 'PDSI']]
ndvi_pdsi_df.head(5)
###Output
_____no_output_____
###Markdown
NDVI and PDSI are now included in the same DataFrame linked by Year. This format is suitable for determining a linear relationship and drawing a line of best fit through the data.Including a line of best fit can be a helpful visual aid. Here, a 1D polynomial is fit through the xy point cloud defined by corresponding NDVI and PDSI observations. The resulting fit is added to the DataFrame as a new column 'Fit'.8. Add a line of best fit between PDSI and NDVI by determining the linear relationship and predicting NDVI based on PDSI for each year.
###Code
ndvi_pdsi_df['Fit'] = np.poly1d(
np.polyfit(ndvi_pdsi_df['PDSI'], ndvi_pdsi_df['NDVI'], 1))(
ndvi_pdsi_df['PDSI'])
ndvi_pdsi_df.head(5)
###Output
_____no_output_____
###Markdown
Scatter plotThe DataFrame is ready for plotting. Since this chart is to include points and a line of best fit, two charts need to be created, one for the points and one for the line. The results are combined into the final plot.
###Code
base = alt.Chart(ndvi_pdsi_df).encode(
x=alt.X('PDSI:Q', scale=alt.Scale(domain=(-5, 5))))
points = base.mark_circle(size=60).encode(
y=alt.Y('NDVI:Q', scale=alt.Scale(domain=(0.4, 0.6))),
color=alt.Color('Year:O', scale=alt.Scale(scheme='magma')),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('PDSI:Q', title='PDSI'),
alt.Tooltip('NDVI:Q', title='NDVI')
])
fit = base.mark_line().encode(
y=alt.Y('Fit:Q'),
color=alt.value('#808080'))
(points + fit).properties(width=600, height=300).interactive()
###Output
_____no_output_____
###Markdown
As you can see, there seems to be some degree of positive correlation between PDSI and NDVI (i.e., as wetness increases, vegetation productivity increases; as wetness decreases, vegetation productivity decreases). Note that some of the greatest outliers are 2016, 2017, 2018 - the three years following recovery from the long drought. It is also important to note that there are many other factors that may influence the NDVI signal that are not being considered here. Patch-level vegetation mortalityAt a regional scale there appears to be a relationship between drought and vegetation productivity. This section will look more closely at effects of drought on vegetation at a patch level, with a specific focus on mortality. Here, a Landsat time series collection is created for the period 1984-present to provide greater temporal context for change at a relatively precise spatial resolution. Find a point of interestUse [aerial imagery](https://developers.google.com/earth-engine/datasets/catalog/USDA_NAIP_DOQQ) from the National Agriculture Imagery Program (NAIP) in an interactive [Folium](https://python-visualization.github.io/folium/) map to identify a location in the Sierra Nevada ecoregion that appears to have patches of dead trees.1. Run the following code block to render an interactive Folium map for a selected NAIP image.2. Zoom and pan around the image to identify a region of recently dead trees (standing silver snags with no fine branches or brown/grey snags with fine branches).3. Click the map to list the latitude and longitude for a patch of interest. Record these values for use in the following section (the example location used in the following section is presented as a yellow point).
###Code
# Define a method for displaying Earth Engine image tiles to folium map.
def add_ee_layer(self, ee_image_object, vis_params, name):
map_id_dict = ee.Image(ee_image_object).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles=map_id_dict['tile_fetcher'].url_format,
attr='Map Data © <a href="https://earthengine.google.com/">Google Earth Engine, USDA National Agriculture Imagery Program</a>',
name=name,
overlay=True,
control=True).add_to(self)
# Add an Earth Engine layer drawing method to folium.
folium.Map.add_ee_layer = add_ee_layer
# Import a NAIP image for the area and date of interest.
naip_img = ee.ImageCollection('USDA/NAIP/DOQQ').filterDate(
'2016-01-01',
'2017-01-01').filterBounds(ee.Geometry.Point([-118.6407, 35.9665])).first()
# Display the NAIP image to the folium map.
m = folium.Map(location=[35.9665, -118.6407], tiles='Stamen Terrain', zoom_start=16, height=500)
m.add_ee_layer(naip_img, None, 'NAIP image, 2016')
# Add the point of interest to the map.
folium.Circle(
radius=15,
location=[35.9665, -118.6407],
color='yellow',
fill=False,
).add_to(m)
# Add the AOI to the map.
folium.GeoJson(
aoi.getInfo(),
name='geojson',
style_function=lambda x: {'fillColor': '#00000000', 'color': '#000000'},
).add_to(m)
# Add a lat lon popup.
folium.LatLngPopup().add_to(m)
# Display the map.
display(m)
###Output
_____no_output_____
###Markdown
Prepare Landsat collectionLandsat surface reflectance data need to be prepared before being reduced. The steps below will organize data from multiple sensors into congruent collections where band names are consistent, cloud and cloud shadows have been masked out, and the normalized burn ratio (NBR) transformation is calculated and returned as the image representative (NBR is a good indicator of forest disturbance). Finally, all sensor collections will be merged into a single collection and annual composites calculated based on mean annual NBR using a join. 1. Define Landsat observation date window inputs based on NDVI curve plotted previously and set latitude and longitude variables from the map above.
###Code
start_day = 224
end_day = 272
latitude = 35.9665
longitude = -118.6407
###Output
_____no_output_____
###Markdown
**Note**: in your own application it may be necessary to change these values. 2. Prepare a Landsat surface reflectance collection 1984-present. Those unfamiliar with Landsat might find the following acronym definitions and links helpful. - [OLI](https://www.usgs.gov/land-resources/nli/landsat/landsat-8?qt-science_support_page_related_con=0qt-science_support_page_related_con) (Landsat's Operational Land Imager sensor) - [ETM+](https://www.usgs.gov/land-resources/nli/landsat/landsat-7?qt-science_support_page_related_con=0qt-science_support_page_related_con) (Landsat's Enhanced Thematic Mapper Plus sensor) - [TM](https://www.usgs.gov/land-resources/nli/landsat/landsat-5?qt-science_support_page_related_con=0qt-science_support_page_related_con) (Landsat's Thematic Mapper sensor) - [CFMask](https://www.usgs.gov/land-resources/nli/landsat/cfmask-algorithm) (Landsat USGS surface reflectance mask based on the CFMask algorithm) - [NBR](https://www.usgs.gov/land-resources/nli/landsat/landsat-normalized-burn-ratio:~:text=NBR%20is%20used%20to%20identify,SWIR%20values%20in%20traditional%20fashion.&text=In%20Landsat%204%2D7%2C%20NBR,Band%205%20%2B%20Band%207). (Normalized Burn Ratio: a spectral vegetation index) - Understanding [Earth Engine joins](https://developers.google.com/earth-engine/joins_intro)
###Code
# Make lat. and long. vars an `ee.Geometry.Point`.
point = ee.Geometry.Point([longitude, latitude])
# Define a function to get and rename bands of interest from OLI.
def rename_oli(img):
return (img.select(
ee.List(['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa']),
ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa'])))
# Define a function to get and rename bands of interest from ETM+.
def rename_etm(img):
return (img.select(
ee.List(['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa']),
ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa'])))
# Define a function to mask out clouds and cloud shadows.
def cfmask(img):
cloud_shadow_bi_mask = 1 << 3
cloud_bit_mask = 1 << 5
qa = img.select('pixel_qa')
mask = qa.bitwiseAnd(cloud_shadow_bi_mask).eq(0).And(
qa.bitwiseAnd(cloud_bit_mask).eq(0))
return img.updateMask(mask)
# Define a function to add year as an image property.
def set_year(img):
year = ee.Image(img).date().get('year')
return img.set('Year', year)
# Define a function to calculate NBR.
def calc_nbr(img):
return img.normalizedDifference(ee.List(['NIR', 'SWIR2'])).rename('NBR')
# Define a function to prepare OLI images.
def prep_oli(img):
orig = img
img = rename_oli(img)
img = cfmask(img)
img = calc_nbr(img)
img = img.copyProperties(orig, orig.propertyNames())
return set_year(img)
# Define a function to prepare TM/ETM+ images.
def prep_etm(img):
orig = img
img = rename_etm(img)
img = cfmask(img)
img = calc_nbr(img)
img = img.copyProperties(orig, orig.propertyNames())
return set_year(img)
# Import image collections for each Landsat sensor (surface reflectance).
tm_col = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR')
etm_col = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR')
oli_col = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
# Filter collections and prepare them for merging.
oli_col = oli_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_oli)
etm_col = etm_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm)
tm_col = tm_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm)
# Merge the collections.
landsat_col = oli_col.merge(etm_col).merge(tm_col)
# Get a distinct year collection.
distinct_year_col = landsat_col.distinct('Year')
# Define a filter that identifies which images from the complete collection
# match the year from the distinct year collection.
join_filter = ee.Filter.equals(leftField='Year', rightField='Year')
# Define a join.
join = ee.Join.saveAll('year_matches')
# Apply the join and convert the resulting FeatureCollection to an
# ImageCollection.
join_col = ee.ImageCollection(
join.apply(distinct_year_col, landsat_col, join_filter))
# Define a function to apply mean reduction among matching year collections.
def reduce_by_join(img):
year_col = ee.ImageCollection.fromImages(ee.Image(img).get('year_matches'))
return year_col.reduce(ee.Reducer.mean()).rename('NBR').set(
'system:time_start',
ee.Image(img).date().update(month=8, day=1).millis())
# Apply the `reduce_by_join` function to the list of annual images in the
# properties of the join collection.
landsat_col = join_col.map(reduce_by_join)
###Output
_____no_output_____
###Markdown
The result of the above code block is an image collection with as many images as there are years present in the merged Landsat collection. Each image represents the annual mean NBR constrained to observations within the given date window. Prepare DataFrame1. Create a region reduction function; use `ee.Reducer.first()` as the reducer since no spatial aggregation is needed (we are interested in the single pixel that intersects the point). Set the region as the geometry defined by the lat. and long. coordinates identified in the above map.2. Apply the function to all images in the time series.3. Filter out features with null computed values.
###Code
reduce_landsat = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=30, crs='EPSG:3310')
nbr_stat_fc = ee.FeatureCollection(landsat_col.map(reduce_landsat)).filter(
ee.Filter.notNull(landsat_col.first().bandNames()))
###Output
_____no_output_____
###Markdown
4. Transfer data from the server to the client.*Note: if the process times out, you'll need to export/import the `nbr_stat_fc` feature collection as described in the **Optional export** section*.5. Convert the Python dictionary to a pandas DataFrame.6. Preview the DataFrame and check data types.
###Code
nbr_dict = fc_to_dict(nbr_stat_fc).getInfo()
nbr_df = pd.DataFrame(nbr_dict)
display(nbr_df)
print(nbr_df.dtypes)
###Output
_____no_output_____
###Markdown
7. Add date attribute columns.8. Preview the DataFrame.
###Code
nbr_df = add_date_info(nbr_df)
nbr_df.head(5)
###Output
_____no_output_____
###Markdown
Line chartDisplay the Landsat NBR time series for the point of interest as a line plot.
###Code
alt.Chart(nbr_df).mark_line().encode(
x=alt.X('Timestamp:T', title='Date'),
y='NBR:Q',
tooltip=[
alt.Tooltip('Timestamp:T', title='Date'),
alt.Tooltip('NBR:Q')
]).properties(width=600, height=300).interactive()
###Output
_____no_output_____
###Markdown
As you can see from the above time series of NBR observations, a dramatic decrease in NBR began in 2015, shortly after the severe and extended drought began. The decline continued through 2017, when a minor recovery began. Within the context of the entire time series, it is apparent that the decline is outside of normal inter-annual variability and that the reduction in NBR for this site is quite severe. The lack of major recovery response in NBR in 2017-19 (time of writing) indicates that the event was not ephemeral; the loss of vegetation will have a lasting impact on this site. The corresponding onset of drought and reduction in NBR provides further evidence that there is a relationship between drought and vegetation response in the Sierra Nevada ecoregion. Past and future climateThe previous data visualizations suggest there is a relationship between drought and vegetation stress and mortality in the Sierra Nevada ecoregion. This section will look at how climate is projected to change in the future, which can give us a sense for what to expect with regard to drought conditions and speculate about its impact on vegetation.We'll look at historical and projected temperature and precipitation. Projected data are represented by NEX-DCP30, and historical observations by PRISM. Future climateNEX-DCP30 data contain 33 climate models projected to the year 2100 using several scenarios of greenhouse gas concentration pathways (RCP). Here, we'll use the median of all models for RCP 8.5 (the worst case scenario) to look at potential future temperature and precipitation. Import and prepare collection1. Filter the collection by date and scenario.2. Calculate 'mean' temperature from median min and max among 33 models.
###Code
dcp_col = (ee.ImageCollection('NASA/NEX-DCP30_ENSEMBLE_STATS')
.select(['tasmax_median', 'tasmin_median', 'pr_median'])
.filter(
ee.Filter.And(ee.Filter.eq('scenario', 'rcp85'),
ee.Filter.date('2019-01-01', '2070-01-01'))))
def calc_mean_temp(img):
return (img.select('tasmax_median')
.add(img.select('tasmin_median'))
.divide(ee.Image.constant(2.0))
.addBands(img.select('pr_median'))
.rename(['Temp-mean', 'Precip-rate'])
.copyProperties(img, img.propertyNames()))
dcp_col = dcp_col.map(calc_mean_temp)
###Output
_____no_output_____
###Markdown
Prepare DataFrame1. Create a region reduction function.2. Apply the function to all images in the time series.3. Filter out features with null computed values.
###Code
reduce_dcp30 = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310')
dcp_stat_fc = ee.FeatureCollection(dcp_col.map(reduce_dcp30)).filter(
ee.Filter.notNull(dcp_col.first().bandNames()))
###Output
_____no_output_____
###Markdown
4. Transfer data from the server to the client. *Note: if the process times out, you'll need to export/import the `dcp_stat_fc` feature collection as described in the **Optional export** section*.5. Convert the Python dictionary to a pandas DataFrame.6. Preview the DataFrame and check the data types.
###Code
dcp_dict = fc_to_dict(dcp_stat_fc).getInfo()
dcp_df = pd.DataFrame(dcp_dict)
display(dcp_df)
print(dcp_df.dtypes)
###Output
_____no_output_____
###Markdown
7. Add date attribute columns.8. Preview the DataFrame.
###Code
dcp_df = add_date_info(dcp_df)
dcp_df.head(5)
###Output
_____no_output_____
###Markdown
9. Convert precipitation rate to mm.10. Convert Kelvin to celsius.11. Add the model name as a column.12. Remove the 'Precip-rate' column.
###Code
dcp_df['Precip-mm'] = dcp_df['Precip-rate'] * 86400 * 30
dcp_df['Temp-mean'] = dcp_df['Temp-mean'] - 273.15
dcp_df['Model'] = 'NEX-DCP30'
dcp_df = dcp_df.drop('Precip-rate', 1)
dcp_df.head(5)
###Output
_____no_output_____
###Markdown
Past climatePRISM data are climate datasets for the conterminous United States. Grid cells are interpolated based on station data assimilated from many networks across the country. The datasets used here are monthly averages for precipitation and temperature. They provide a record of historical climate. Reduce collection and prepare DataFrame1. Import the collection and filter by date.2. Reduce the collection images by region and filter null computed values.3. Convert the feature collection to a dictionary and transfer it client-side.*Note: if the process times out, you'll need to export/import the `prism_stat_fc` feature collection as described in the **Optional export** section*.4. Convert the dictionary to a DataFrame.5. Preview the DataFrame.
###Code
prism_col = (ee.ImageCollection('OREGONSTATE/PRISM/AN81m')
.select(['ppt', 'tmean'])
.filter(ee.Filter.date('1979-01-01', '2019-12-31')))
reduce_prism = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310')
prism_stat_fc = (ee.FeatureCollection(prism_col.map(reduce_prism))
.filter(ee.Filter.notNull(prism_col.first().bandNames())))
prism_dict = fc_to_dict(prism_stat_fc).getInfo()
prism_df = pd.DataFrame(prism_dict)
display(prism_df)
print(prism_df.dtypes)
###Output
_____no_output_____
###Markdown
6. Add date attribute columns.7. Add model name.8. Rename columns to be consistent with the NEX-DCP30 DataFrame.9. Preview the DataFrame.
###Code
prism_df = add_date_info(prism_df)
prism_df['Model'] = 'PRISM'
prism_df = prism_df.rename(columns={'ppt': 'Precip-mm', 'tmean': 'Temp-mean'})
prism_df.head(5)
###Output
_____no_output_____
###Markdown
Combine DataFrames At this point the PRISM and NEX-DCP30 DataFrames have the same columns, the same units, and are distinguished by unique entries in the 'Model' column. Use the `concat` function to concatenate these DataFrames into a single DataFrame for plotting together in the same chart.
###Code
climate_df = pd.concat([prism_df, dcp_df], sort=True)
climate_df
###Output
_____no_output_____
###Markdown
ChartsChart the past and future precipitation and temperature together to get a sense for where climate has been and where it is projected to go under RCP 8.5. Precipitation
###Code
base = alt.Chart(climate_df).encode(
x='Year:O',
color='Model')
line = base.mark_line().encode(
y=alt.Y('median(Precip-mm):Q', title='Precipitation (mm/month)'))
band = base.mark_errorband(extent='iqr').encode(
y=alt.Y('Precip-mm:Q', title='Precipitation (mm/month)'))
(band + line).properties(width=600, height=300)
###Output
_____no_output_____
###Markdown
Temperature
###Code
line = alt.Chart(climate_df).mark_line().encode(
x='Year:O',
y='median(Temp-mean):Q',
color='Model')
band = alt.Chart(climate_df).mark_errorband(extent='iqr').encode(
x='Year:O',
y=alt.Y('Temp-mean:Q', title='Temperature (°C)'), color='Model')
(band + line).properties(width=600, height=300)
###Output
_____no_output_____
###Markdown
Time Series Visualization with AltairAuthor: jdbcodeThis tutorial provides methods for generating time series data in Earth Engine and visualizing it with the [Altair](https://altair-viz.github.io/) library using drought and vegetation response as an example.Topics include:- Time series region reduction in Earth Engine- Formatting a table in Earth Engine- Transferring an Earth Engine table to a Colab Python kernel- Converting an Earth Engine table to a [pandas](https://pandas.pydata.org/) DataFrame- Data representation with various Altair chart types**Note** that this tutorial uses the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) in a [Colab notebook](https://developers.google.com/earth-engine/python_install-colab.html). ContextAt the heart of this tutorial is the notion of data reduction and the need to transform data into insights to help inform our understanding of Earth processes and human's role in them. It combines a series of technologies, each best suited to a particular task in the data reduction process. **Earth Engine** is used to access, clean, and reduce large amounts of spatiotemporal data, **pandas** is used to analyze and organize the results, and **Altair** is used to visualize the results.**Note**: This notebook demonstrates an analysis template and interactive workflow that is appropriate for a certain size of dataset, but there are limitations to interactive computation time and server-to-client data transfer size imposed by Colab and Earth Engine. To analyze even larger datasets, you may need to modify the workflow to [export](https://developers.google.com/earth-engine/python_installexporting-data) `FeatureCollection` results from Earth Engine as static assets and then use the static assets to perform the subsequent steps involving Earth Engine table formatting, conversion to pandas DataFrame, and charting with Altair. Materials DatasetsClimate- Drought severity ([PDSI](https://developers.google.com/earth-engine/datasets/catalog/IDAHO_EPSCOR_PDSI))- Historical climate ([PRISM](https://developers.google.com/earth-engine/datasets/catalog/OREGONSTATE_PRISM_AN81m))- Projected climate ([NEX-DCP30](https://developers.google.com/earth-engine/datasets/catalog/NASA_NEX-DCP30))Vegetation proxies- NDVI ([MODIS](https://developers.google.com/earth-engine/datasets/catalog/MODIS_006_MOD13A2))- NBR ([Landsat](https://developers.google.com/earth-engine/datasets/catalog/landsat/)) Region of interestThe region of interest for these examples is the Sierra Nevada ecoregion of California. The vegetation grades from mostly ponderosa pine and Douglas-fir at low elevations on the western side, to pines and Sierra juniper on the eastern side, and to fir and other conifers at higher elevations. General workflowPreparation of every dataset for visualization follows the same basic steps:1. Filter the dataset (server-side Earth Engine)2. Reduce the data region by a statistic (server-side Earth Engine)3. Format the region reduction into a table (server-side Earth Engine)4. Convert the Earth Engine table to a DataFrame (server-side Earth Engine > client-side Python kernel)5. Alter the DataFrame (client-side pandas)6. Plot the DataFrame (client-side Altair)The first dataset will walk through each step in detail. Following examples will provide less description, unless there is variation that merits note. Python setup Earth Engine API1. Import the Earth Engine library.2. Authenticate access (registration verification and Google account access).3. Initialize the API.
###Code
import ee
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Other librariesImport other libraries used in this notebook.- [**pandas**](https://pandas.pydata.org/): data analysis (including the [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) data structure)- [**altair**](https://altair-viz.github.io/): declarative visualization library (used for charting)- [**numpy**](https://numpy.org/): array-processing package (used for linear regression)- [**folium**](https://python-visualization.github.io/folium/): interactive web map
###Code
import pandas as pd
import altair as alt
import numpy as np
import folium
###Output
_____no_output_____
###Markdown
Region reduction functionReduction of pixels intersecting the region of interest to a statistic will be performed multiple times. Define a reusable function that can perform the task for each dataset. The function accepts arguments such as scale and reduction method to parameterize the operation for each particular analysis.**Note**: most of the reduction operations in this tutorial use a large pixel scale so that operations complete quickly. In your own application, set the scale and other parameter arguments as you wish.
###Code
def create_reduce_region_function(geometry,
reducer=ee.Reducer.mean(),
scale=1000,
crs='EPSG:4326',
bestEffort=True,
maxPixels=1e13,
tileScale=4):
"""Creates a region reduction function.
Creates a region reduction function intended to be used as the input function
to ee.ImageCollection.map() for reducing pixels intersecting a provided region
to a statistic for each image in a collection. See ee.Image.reduceRegion()
documentation for more details.
Args:
geometry:
An ee.Geometry that defines the region over which to reduce data.
reducer:
Optional; An ee.Reducer that defines the reduction method.
scale:
Optional; A number that defines the nominal scale in meters of the
projection to work in.
crs:
Optional; An ee.Projection or EPSG string ('EPSG:5070') that defines
the projection to work in.
bestEffort:
Optional; A Boolean indicator for whether to use a larger scale if the
geometry contains too many pixels at the given scale for the operation
to succeed.
maxPixels:
Optional; A number specifying the maximum number of pixels to reduce.
tileScale:
Optional; A number representing the scaling factor used to reduce
aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable
computations that run out of memory with the default.
Returns:
A function that accepts an ee.Image and reduces it by region, according to
the provided arguments.
"""
def reduce_region_function(img):
"""Applies the ee.Image.reduceRegion() method.
Args:
img:
An ee.Image to reduce to a statistic by region.
Returns:
An ee.Feature that contains properties representing the image region
reduction results per band and the image timestamp formatted as
milliseconds from Unix epoch (included to enable time series plotting).
"""
stat = img.reduceRegion(
reducer=reducer,
geometry=geometry,
scale=scale,
crs=crs,
bestEffort=bestEffort,
maxPixels=maxPixels,
tileScale=tileScale)
return ee.Feature(geometry, stat).set({'millis': img.date().millis()})
return reduce_region_function
###Output
_____no_output_____
###Markdown
FormattingThe result of the region reduction function above applied to an `ee.ImageCollection` produces an `ee.FeatureCollection`. This data needs to be transferred to the Python kernel, but serialized feature collections are large and awkward to deal with. This step defines a function to convert the feature collection to an `ee.Dictionary` where the keys are feature property names and values are corresponding lists of property values, which `pandas` can deal with handily.1. Extract the property values from the `ee.FeatureCollection` as a list of lists stored in an `ee.Dictionary` using `reduceColumns()`.2. Extract the list of lists from the dictionary.3. Add names to each list by converting to an `ee.Dictionary` where keys are property names and values are the corresponding value lists.The returned `ee.Dictionary` is essentially a table, where keys define columns and list elements define rows.
###Code
# Define a function to transfer feature properties to a dictionary.
def fc_to_dict(fc):
prop_names = fc.first().propertyNames()
prop_lists = fc.reduceColumns(
reducer=ee.Reducer.toList().repeat(prop_names.size()),
selectors=prop_names).get('list')
return ee.Dictionary.fromLists(prop_names, prop_lists)
###Output
_____no_output_____
###Markdown
Drought severityIn this section we'll look at a time series of drought severity as a calendar heat map and a bar chart. Import data1. Load the gridded Palmer Drought Severity Index (PDSI) data as an `ee.ImageCollection`.2. Load the EPA Level-3 ecoregion boundaries as an `ee.FeatureCollection` and filter it to include only the Sierra Nevada region, which defines the area of interest (AOI).
###Code
pdsi = ee.ImageCollection('IDAHO_EPSCOR/PDSI')
aoi = ee.FeatureCollection('EPA/Ecoregions/2013/L3').filter(
ee.Filter.eq('na_l3name', 'Sierra Nevada')).geometry()
###Output
_____no_output_____
###Markdown
**Note**: the `aoi` defined above will be used throughout this tutorial. In your own application, redefine it for your own area of interest. Reduce data1. Create a region reduction function.2. Map the function over the `pdsi` image collection to reduce each image.3. Filter out any resulting features that have null computed values (occurs when all pixels in an AOI are masked).
###Code
reduce_pdsi = create_reduce_region_function(
geometry=aoi, reducer=ee.Reducer.mean(), scale=5000, crs='EPSG:3310')
pdsi_stat_fc = ee.FeatureCollection(pdsi.map(reduce_pdsi)).filter(
ee.Filter.notNull(pdsi.first().bandNames()))
###Output
_____no_output_____
###Markdown
---**STOP**: _Optional export__If your process is long-running_, you'll want to export the `pdsi_stat_fc` variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on. Please see the Developer Guide section on [exporting with the Python API](https://developers.google.com/earth-engine/python_installexporting-data).Export to asset:
###Code
"""
task = ee.batch.Export.table.toAsset(
collection=pdsi_stat_fc,
description='pdsi_stat_fc export',
assetId='users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair')
task.start()
"""
###Output
_____no_output_____
###Markdown
Import the asset after the export completes:
###Code
"""
pdsi_stat_fc = ee.FeatureCollection('users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair')
"""
###Output
_____no_output_____
###Markdown
_\* Remove triple quote comment fence to run the above cells._ ---**CONTINUE**: Server to client transfer The `ee.FeatureCollection` needs to be converted to a dictionary and transferred to the Python kernel.1. Apply the `fc_to_dict` function to convert from `ee.FeatureCollection` to `ee.Dictionary`.2. Call `getInfo()` on the `ee.Dictionary` to transfer the data client-side.
###Code
pdsi_dict = fc_to_dict(pdsi_stat_fc).getInfo()
###Output
_____no_output_____
###Markdown
The result is a Python dictionary. Print a small part to see how it is formatted.
###Code
print(type(pdsi_dict), '\n')
for prop in pdsi_dict.keys():
print(prop + ':', pdsi_dict[prop][0:3] + ['...'])
###Output
_____no_output_____
###Markdown
Convert the Python dictionary to a pandas DataFrame.
###Code
pdsi_df = pd.DataFrame(pdsi_dict)
###Output
_____no_output_____
###Markdown
Preview the DataFrame and check the column data types.
###Code
display(pdsi_df)
print(pdsi_df.dtypes)
###Output
_____no_output_____
###Markdown
Add date columns Add date columns derived from the milliseconds from Unix epoch column. The pandas library provides functions and objects for timestamps and the DataFrame object allows for easy mutation.Define a function to add date variables to the DataFrame: year, month, day, and day of year (DOY).
###Code
# Function to add date variables to DataFrame.
def add_date_info(df):
df['Timestamp'] = pd.to_datetime(df['millis'], unit='ms')
df['Year'] = pd.DatetimeIndex(df['Timestamp']).year
df['Month'] = pd.DatetimeIndex(df['Timestamp']).month
df['Day'] = pd.DatetimeIndex(df['Timestamp']).day
df['DOY'] = pd.DatetimeIndex(df['Timestamp']).dayofyear
return df
###Output
_____no_output_____
###Markdown
**Note**: the above function for adding date information to a DataFrame will be used throughout this tutorial. Apply the `add_date_info` function to the PDSI DataFrame to add date attribute columns, preview the results.
###Code
pdsi_df = add_date_info(pdsi_df)
pdsi_df.head(5)
###Output
_____no_output_____
###Markdown
Rename and drop columns Often it is desirable to rename columns and/or remove unnecessary columns. Do both here and preview the DataFrame.
###Code
pdsi_df = pdsi_df.rename(columns={
'pdsi': 'PDSI'
}).drop(columns=['millis', 'system:index'])
pdsi_df.head(5)
###Output
_____no_output_____
###Markdown
Check the data type of each column.
###Code
pdsi_df.dtypes
###Output
_____no_output_____
###Markdown
At this point the DataFrame is in good shape for charting with Altair. Calendar heatmap Chart PDSI data as a calendar heatmap. Set observation year as the x-axis variable, month as y-axis, and PDSI value as color.Note that Altair features a convenient [method for aggregating values within groups](https://altair-viz.github.io/user_guide/transform/aggregate.html) while encoding the chart (i.e., no need to create a new DataFrame). The mean aggregate transform is applied here because each month has three PDSI observations (year and month are the grouping factors).Also note that a tooltip has been added to the chart; hovering over cells reveals the values of the selected variables.
###Code
alt.Chart(pdsi_df).mark_rect().encode(
x='Year:O',
y='Month:O',
color=alt.Color(
'mean(PDSI):Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('Month:O', title='Month'),
alt.Tooltip('mean(PDSI):Q', title='PDSI')
]).properties(width=600, height=300)
###Output
_____no_output_____
###Markdown
The calendar heat map is good for interpretation of relative intra- and inter-annual differences in PDSI. However, since the PDSI variable is represented by color, estimating absolute values and magnitude of difference is difficult. Bar chart Chart PDSI time series as a bar chart to more easily interpret absolute values and compare them over time. Here, the observation timestamp is represented on the x-axis and PDSI is represented by both the y-axis and color. Since each PDSI observation has a unique timestamp that can be plotted to the x-axis, there is no need to aggregate PDSI values as in the above chart. A tooltip is added to the chart; hover over the bars to reveal the values for each variable.
###Code
alt.Chart(pdsi_df).mark_bar(size=1).encode(
x='Timestamp:T',
y='PDSI:Q',
color=alt.Color(
'PDSI:Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))),
tooltip=[
alt.Tooltip('Timestamp:T', title='Date'),
alt.Tooltip('PDSI:Q', title='PDSI')
]).properties(width=600, height=300)
###Output
_____no_output_____
###Markdown
This temporal bar chart makes it easier to interpret and compare absolute values of PDSI over time, but relative intra- and inter-annual variability are arguably harder to interpret because the division of year and month is not as distinct as in the calendar heatmap above.Take note of the extended and severe period of drought from 2012 through 2016. In the next section, we'll look for a vegetation response to this event. Vegetation productivityNDVI is a proxy measure of photosynthetic capacity and is used in this tutorial to investigate vegetation response to the 2012-2016 drought identified in the PDSI bar chart above.MODIS provides an analysis-ready 16-day NDVI composite that is well suited for regional investigation of temporal vegetation dynamics. The following steps reduce and prepare this data for charting in the same manner as the PDSI data above; please refer to previous sections to review details. Import and reduce1. Load the MODIS NDVI data as an `ee.ImageCollection`.1. Create a region reduction function.3. Apply the function to all images in the time series.4. Filter out features with null computed values.
###Code
ndvi = ee.ImageCollection('MODIS/006/MOD13A2').select('NDVI')
reduce_ndvi = create_reduce_region_function(
geometry=aoi, reducer=ee.Reducer.mean(), scale=1000, crs='EPSG:3310')
ndvi_stat_fc = ee.FeatureCollection(ndvi.map(reduce_ndvi)).filter(
ee.Filter.notNull(ndvi.first().bandNames()))
###Output
_____no_output_____
###Markdown
---**STOP**: _If your process is long-running_, you'll want to export the `ndvi_stat_fc` variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on.Please see the above **_Optional export_** section for more details.**CONTINUE**:--- Prepare DataFrame 1. Transfer data from the server to the client.2. Convert the Python dictionary to a pandas DataFrame.3. Preview the DataFrame and check data types.
###Code
ndvi_dict = fc_to_dict(ndvi_stat_fc).getInfo()
ndvi_df = pd.DataFrame(ndvi_dict)
display(ndvi_df)
print(ndvi_df.dtypes)
###Output
_____no_output_____
###Markdown
4. Remove the NDVI scaling.5. Add date attribute columns.6. Preview the DataFrame.
###Code
ndvi_df['NDVI'] = ndvi_df['NDVI'] / 10000
ndvi_df = add_date_info(ndvi_df)
ndvi_df.head(5)
###Output
_____no_output_____
###Markdown
These NDVI time series data are now ready for plotting. DOY line chart Make a day of year (DOY) line chart where each line represents a year of observations. This chart makes it possible to compare the same observation date among years. Use it to compare NDVI values for years during the drought and not.Day of year is represented on the x-axis and NDVI on the y-axis. Each line represents a year and is distinguished by color. Note that this plot includes a tooltip and has been made interactive so that the axes can be zoomed and panned.
###Code
highlight = alt.selection(
type='single', on='mouseover', fields=['Year'], nearest=True)
base = alt.Chart(ndvi_df).encode(
x=alt.X('DOY:Q', scale=alt.Scale(domain=[0, 353], clamp=True)),
y=alt.Y('NDVI:Q', scale=alt.Scale(domain=[0.1, 0.6])),
color=alt.Color('Year:O', scale=alt.Scale(scheme='magma')))
points = base.mark_circle().encode(
opacity=alt.value(0),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('DOY:Q', title='DOY'),
alt.Tooltip('NDVI:Q', title='NDVI')
]).add_selection(highlight)
lines = base.mark_line().encode(
size=alt.condition(~highlight, alt.value(1), alt.value(3)))
(points + lines).properties(width=600, height=350).interactive()
###Output
_____no_output_____
###Markdown
The first thing to note is that winter dates (when there is snow in the Sierra Nevada ecoregion) exhibit highly variable inter-annual NDVI, but spring, summer, and fall dates are more consistent. With regard to drought effects on vegetation, summer and fall dates are the most sensitive time. Zooming into observations for the summer/fall days (224-272), you'll notice that many years have a u-shaped pattern where NDVI values decrease and then rise. Another way to view these data is to plot the distribution of NDVI by DOY represented as an interquartile range envelope and median line. Here, these two charts are defined and then combined in the following snippet.1. Define a base chart.2. Define a line chart for median NDVI (note the use of aggregate median transform grouping by DOY).3. Define a band chart using `'iqr'` (interquartile range) to represent NDVI distribution grouping on DOY.4. Combine the line and band charts.
###Code
base = alt.Chart(ndvi_df).encode(
x=alt.X('DOY:Q', scale=alt.Scale(domain=(150, 340))))
line = base.mark_line().encode(
y=alt.Y('median(NDVI):Q', scale=alt.Scale(domain=(0.47, 0.53))))
band = base.mark_errorband(extent='iqr').encode(
y='NDVI:Q')
(line + band).properties(width=600, height=300).interactive()
###Output
_____no_output_____
###Markdown
The summary statistics for the summer/fall days (224-272) certainly show an NDVI reduction, but there is also variability; some years exhibit greater NDVI reduction than others as suggested by the wide interquartile range during the middle of the summer. Assuming that NDVI reduction is due to water and heat limiting photosynthesis, we can hypothesize that during years of drought, photosynthesis (NDVI) will be lower than non-drought years. We can investigate the relationship between photosynthesis (NDVI) and drought (PDSI) using a scatter plot and linear regression. Dought and productivity relationshipA scatterplot is a good way to visualize the relationship between two variables. Here, PDSI (drought indicator) will be plotted on the x-axis and NDVI (vegetation productivity) on the y-axis. To achieve this, both variables must exist in the same DataFrame. Each row will be an observation in time and columns will correspond to PDSI and NDVI values. Currently, PDSI and NDVI are in two different DataFrames and need to be merged. Prepare DataFramesBefore they can be merged, each variable must be reduced to a common temporal observation unit to define correspondence. There are a number of ways to do this and each will define the relationship between PDSI and NDVI differently. Here, our temporal unit will be an annual observation set where NDVI is reduced to the intra-annual minimum from DOY 224 to 272 and PDSI will be the mean from DOY 1 to 272. We are proposing that average drought severity for the first three quarters of a year are related to minimum summer NDVI for a given year.1. Filter the NDVI DataFrame to observations that occur between DOY 224 and 272.2. Reduce the DOY-filtered subset to intra-annual minimum NDVI.
###Code
ndvi_doy_range = [224, 272]
ndvi_df_sub = ndvi_df[(ndvi_df['DOY'] >= ndvi_doy_range[0])
& (ndvi_df['DOY'] <= ndvi_doy_range[1])]
ndvi_df_sub = ndvi_df_sub.groupby('Year').agg('min')
###Output
_____no_output_____
###Markdown
**Note**: in your own application you may find that a different DOY range is more suitable, change the `ndvi_doy_range` as needed. 3. Filter the PDSI DataFrame to observations that occur between DOY 1 and 272.4. Reduce the values within a given year to the mean of the observations.
###Code
pdsi_doy_range = [1, 272]
pdsi_df_sub = pdsi_df[(pdsi_df['DOY'] >= pdsi_doy_range[0])
& (pdsi_df['DOY'] <= pdsi_doy_range[1])]
pdsi_df_sub = pdsi_df_sub.groupby('Year').agg('mean')
###Output
_____no_output_____
###Markdown
**Note**: in your own application you may find that a different DOY range is more suitable, change the `pdsi_doy_range` as needed. 5. Perform a join on 'Year' to combine the two reduced DataFrames.6. Select only the columns of interest: 'Year', 'NDVI', 'PDSI'.7. Preview the DataFrame.
###Code
ndvi_pdsi_df = pd.merge(
ndvi_df_sub, pdsi_df_sub, how='left', on='Year').reset_index()
ndvi_pdsi_df = ndvi_pdsi_df[['Year', 'NDVI', 'PDSI']]
ndvi_pdsi_df.head(5)
###Output
_____no_output_____
###Markdown
NDVI and PDSI are now included in the same DataFrame linked by Year. This format is suitable for determining a linear relationship and drawing a line of best fit through the data.Including a line of best fit can be a helpful visual aid. Here, a 1D polynomial is fit through the xy point cloud defined by corresponding NDVI and PDSI observations. The resulting fit is added to the DataFrame as a new column 'Fit'.8. Add a line of best fit between PDSI and NDVI by determining the linear relationship and predicting NDVI based on PDSI for each year.
###Code
ndvi_pdsi_df['Fit'] = np.poly1d(
np.polyfit(ndvi_pdsi_df['PDSI'], ndvi_pdsi_df['NDVI'], 1))(
ndvi_pdsi_df['PDSI'])
ndvi_pdsi_df.head(5)
###Output
_____no_output_____
###Markdown
Scatter plotThe DataFrame is ready for plotting. Since this chart is to include points and a line of best fit, two charts need to be created, one for the points and one for the line. The results are combined into the final plot.
###Code
base = alt.Chart(ndvi_pdsi_df).encode(
x=alt.X('PDSI:Q', scale=alt.Scale(domain=(-5, 5))))
points = base.mark_circle(size=60).encode(
y=alt.Y('NDVI:Q', scale=alt.Scale(domain=(0.4, 0.6))),
color=alt.Color('Year:O', scale=alt.Scale(scheme='magma')),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('PDSI:Q', title='PDSI'),
alt.Tooltip('NDVI:Q', title='NDVI')
])
fit = base.mark_line().encode(
y=alt.Y('Fit:Q'),
color=alt.value('#808080'))
(points + fit).properties(width=600, height=300).interactive()
###Output
_____no_output_____
###Markdown
As you can see, there seems to be some degree of positive correlation between PDSI and NDVI (i.e., as wetness increases, vegetation productivity increases; as wetness decreases, vegetation productivity decreases). Note that some of the greatest outliers are 2016, 2017, 2018 - the three years following recovery from the long drought. It is also important to note that there are many other factors that may influence the NDVI signal that are not being considered here. Patch-level vegetation mortalityAt a regional scale there appears to be a relationship between drought and vegetation productivity. This section will look more closely at effects of drought on vegetation at a patch level, with a specific focus on mortality. Here, a Landsat time series collection is created for the period 1984-present to provide greater temporal context for change at a relatively precise spatial resolution. Find a point of interestUse [aerial imagery](https://developers.google.com/earth-engine/datasets/catalog/USDA_NAIP_DOQQ) from the National Agriculture Imagery Program (NAIP) in an interactive [Folium](https://python-visualization.github.io/folium/) map to identify a location in the Sierra Nevada ecoregion that appears to have patches of dead trees.1. Run the following code block to render an interactive Folium map for a selected NAIP image.2. Zoom and pan around the image to identify a region of recently dead trees (standing silver snags with no fine branches or brown/grey snags with fine branches).3. Click the map to list the latitude and longitude for a patch of interest. Record these values for use in the following section (the example location used in the following section is presented as a yellow point).
###Code
# Define a method for displaying Earth Engine image tiles to folium map.
def add_ee_layer(self, ee_image_object, vis_params, name):
map_id_dict = ee.Image(ee_image_object).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles=map_id_dict['tile_fetcher'].url_format,
attr='Map Data © <a href="https://earthengine.google.com/">Google Earth Engine, USDA National Agriculture Imagery Program</a>',
name=name,
overlay=True,
control=True).add_to(self)
# Add an Earth Engine layer drawing method to folium.
folium.Map.add_ee_layer = add_ee_layer
# Import a NAIP image for the area and date of interest.
naip_img = ee.ImageCollection('USDA/NAIP/DOQQ').filterDate(
'2016-01-01',
'2017-01-01').filterBounds(ee.Geometry.Point([-118.6407, 35.9665])).first()
# Display the NAIP image to the folium map.
m = folium.Map(location=[35.9665, -118.6407], tiles='Stamen Terrain', zoom_start=16, height=500)
m.add_ee_layer(naip_img, None, 'NAIP image, 2016')
# Add the point of interest to the map.
folium.Circle(
radius=15,
location=[35.9665, -118.6407],
color='yellow',
fill=False,
).add_to(m)
# Add the AOI to the map.
folium.GeoJson(
aoi.getInfo(),
name='geojson',
style_function=lambda x: {'fillColor': '#00000000', 'color': '#000000'},
).add_to(m)
# Add a lat lon popup.
folium.LatLngPopup().add_to(m)
# Display the map.
display(m)
###Output
_____no_output_____
###Markdown
Prepare Landsat collectionLandsat surface reflectance data need to be prepared before being reduced. The steps below will organize data from multiple sensors into congruent collections where band names are consistent, cloud and cloud shadows have been masked out, and the normalized burn ratio (NBR) transformation is calculated and returned as the image representative (NBR is a good indicator of forest disturbance). Finally, all sensor collections will be merged into a single collection and annual composites calculated based on mean annual NBR using a join. 1. Define Landsat observation date window inputs based on NDVI curve plotted previously and set latitude and longitude variables from the map above.
###Code
start_day = 224
end_day = 272
latitude = 35.9665
longitude = -118.6407
###Output
_____no_output_____
###Markdown
**Note**: in your own application it may be necessary to change these values. 2. Prepare a Landsat surface reflectance collection 1984-present. Those unfamiliar with Landsat might find the following acronym definitions and links helpful. - [OLI](https://www.usgs.gov/land-resources/nli/landsat/landsat-8?qt-science_support_page_related_con=0qt-science_support_page_related_con) (Landsat's Operational Land Imager sensor) - [ETM+](https://www.usgs.gov/land-resources/nli/landsat/landsat-7?qt-science_support_page_related_con=0qt-science_support_page_related_con) (Landsat's Enhanced Thematic Mapper Plus sensor) - [TM](https://www.usgs.gov/land-resources/nli/landsat/landsat-5?qt-science_support_page_related_con=0qt-science_support_page_related_con) (Landsat's Thematic Mapper sensor) - [CFMask](https://www.usgs.gov/land-resources/nli/landsat/cfmask-algorithm) (Landsat USGS surface reflectance mask based on the CFMask algorithm) - [NBR](https://www.usgs.gov/land-resources/nli/landsat/landsat-normalized-burn-ratio:~:text=NBR%20is%20used%20to%20identify,SWIR%20values%20in%20traditional%20fashion.&text=In%20Landsat%204%2D7%2C%20NBR,Band%205%20%2B%20Band%207). (Normalized Burn Ratio: a spectral vegetation index) - Understanding [Earth Engine joins](https://developers.google.com/earth-engine/joins_intro)
###Code
# Make lat. and long. vars an `ee.Geometry.Point`.
point = ee.Geometry.Point([longitude, latitude])
# Define a function to get and rename bands of interest from OLI.
def rename_oli(img):
return (img.select(
ee.List(['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa']),
ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa'])))
# Define a function to get and rename bands of interest from ETM+.
def rename_etm(img):
return (img.select(
ee.List(['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa']),
ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa'])))
# Define a function to mask out clouds and cloud shadows.
def cfmask(img):
cloud_shadow_bi_mask = 1 << 3
cloud_bit_mask = 1 << 5
qa = img.select('pixel_qa')
mask = qa.bitwiseAnd(cloud_shadow_bi_mask).eq(0).And(
qa.bitwiseAnd(cloud_bit_mask).eq(0))
return img.updateMask(mask)
# Define a function to add year as an image property.
def set_year(img):
year = ee.Image(img).date().get('year')
return img.set('Year', year)
# Define a function to calculate NBR.
def calc_nbr(img):
return img.normalizedDifference(ee.List(['NIR', 'SWIR2'])).rename('NBR')
# Define a function to prepare OLI images.
def prep_oli(img):
orig = img
img = rename_oli(img)
img = cfmask(img)
img = calc_nbr(img)
img = img.copyProperties(orig, orig.propertyNames())
return set_year(img)
# Define a function to prepare TM/ETM+ images.
def prep_etm(img):
orig = img
img = rename_etm(img)
img = cfmask(img)
img = calc_nbr(img)
img = img.copyProperties(orig, orig.propertyNames())
return set_year(img)
# Import image collections for each Landsat sensor (surface reflectance).
tm_col = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR')
etm_col = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR')
oli_col = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
# Filter collections and prepare them for merging.
oli_col = oli_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_oli)
etm_col = etm_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm)
tm_col = tm_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm)
# Merge the collections.
landsat_col = oli_col.merge(etm_col).merge(tm_col)
# Get a distinct year collection.
distinct_year_col = landsat_col.distinct('Year')
# Define a filter that identifies which images from the complete collection
# match the year from the distinct year collection.
join_filter = ee.Filter.equals(leftField='Year', rightField='Year')
# Define a join.
join = ee.Join.saveAll('year_matches')
# Apply the join and convert the resulting FeatureCollection to an
# ImageCollection.
join_col = ee.ImageCollection(
join.apply(distinct_year_col, landsat_col, join_filter))
# Define a function to apply mean reduction among matching year collections.
def reduce_by_join(img):
year_col = ee.ImageCollection.fromImages(ee.Image(img).get('year_matches'))
return year_col.reduce(ee.Reducer.mean()).rename('NBR').set(
'system:time_start',
ee.Image(img).date().update(month=8, day=1).millis())
# Apply the `reduce_by_join` function to the list of annual images in the
# properties of the join collection.
landsat_col = join_col.map(reduce_by_join)
###Output
_____no_output_____
###Markdown
The result of the above code block is an image collection with as many images as there are years present in the merged Landsat collection. Each image represents the annual mean NBR constrained to observations within the given date window. Prepare DataFrame1. Create a region reduction function; use `ee.Reducer.first()` as the reducer since no spatial aggregation is needed (we are interested in the single pixel that intersects the point). Set the region as the geometry defined by the lat. and long. coordinates identified in the above map.2. Apply the function to all images in the time series.3. Filter out features with null computed values.
###Code
reduce_landsat = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=30, crs='EPSG:3310')
nbr_stat_fc = ee.FeatureCollection(landsat_col.map(reduce_landsat)).filter(
ee.Filter.notNull(landsat_col.first().bandNames()))
###Output
_____no_output_____
###Markdown
4. Transfer data from the server to the client.*Note: if the process times out, you'll need to export/import the `nbr_stat_fc` feature collection as described in the **Optional export** section*.5. Convert the Python dictionary to a pandas DataFrame.6. Preview the DataFrame and check data types.
###Code
nbr_dict = fc_to_dict(nbr_stat_fc).getInfo()
nbr_df = pd.DataFrame(nbr_dict)
display(nbr_df)
print(nbr_df.dtypes)
###Output
_____no_output_____
###Markdown
7. Add date attribute columns.8. Preview the DataFrame.
###Code
nbr_df = add_date_info(nbr_df)
nbr_df.head(5)
###Output
_____no_output_____
###Markdown
Line chartDisplay the Landsat NBR time series for the point of interest as a line plot.
###Code
alt.Chart(nbr_df).mark_line().encode(
x=alt.X('Timestamp:T', title='Date'),
y='NBR:Q',
tooltip=[
alt.Tooltip('Timestamp:T', title='Date'),
alt.Tooltip('NBR:Q')
]).properties(width=600, height=300).interactive()
###Output
_____no_output_____
###Markdown
As you can see from the above time series of NBR observations, a dramatic decrease in NBR began in 2015, shortly after the severe and extended drought began. The decline continued through 2017, when a minor recovery began. Within the context of the entire time series, it is apparent that the decline is outside of normal inter-annual variability and that the reduction in NBR for this site is quite severe. The lack of major recovery response in NBR in 2017-19 (time of writing) indicates that the event was not ephemeral; the loss of vegetation will have a lasting impact on this site. The corresponding onset of drought and reduction in NBR provides further evidence that there is a relationship between drought and vegetation response in the Sierra Nevada ecoregion. Past and future climateThe previous data visualizations suggest there is a relationship between drought and vegetation stress and mortality in the Sierra Nevada ecoregion. This section will look at how climate is projected to change in the future, which can give us a sense for what to expect with regard to drought conditions and speculate about its impact on vegetation.We'll look at historical and projected temperature and precipitation. Projected data are represented by NEX-DCP30, and historical observations by PRISM. Future climateNEX-DCP30 data contain 33 climate models projected to the year 2100 using several scenarios of greenhouse gas concentration pathways (RCP). Here, we'll use the median of all models for RCP 8.5 (the worst case scenario) to look at potential future temperature and precipitation. Import and prepare collection1. Filter the collection by date and scenario.2. Calculate 'mean' temperature from median min and max among 33 models.
###Code
dcp_col = (ee.ImageCollection('NASA/NEX-DCP30_ENSEMBLE_STATS')
.select(['tasmax_median', 'tasmin_median', 'pr_median'])
.filter(
ee.Filter.And(ee.Filter.eq('scenario', 'rcp85'),
ee.Filter.date('2019-01-01', '2070-01-01'))))
def calc_mean_temp(img):
return (img.select('tasmax_median')
.add(img.select('tasmin_median'))
.divide(ee.Image.constant(2.0))
.addBands(img.select('pr_median'))
.rename(['Temp-mean', 'Precip-rate'])
.copyProperties(img, img.propertyNames()))
dcp_col = dcp_col.map(calc_mean_temp)
###Output
_____no_output_____
###Markdown
Prepare DataFrame1. Create a region reduction function.2. Apply the function to all images in the time series.3. Filter out features with null computed values.
###Code
reduce_dcp30 = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310')
dcp_stat_fc = ee.FeatureCollection(dcp_col.map(reduce_dcp30)).filter(
ee.Filter.notNull(dcp_col.first().bandNames()))
###Output
_____no_output_____
###Markdown
4. Transfer data from the server to the client. *Note: if the process times out, you'll need to export/import the `dcp_stat_fc` feature collection as described in the **Optional export** section*.5. Convert the Python dictionary to a pandas DataFrame.6. Preview the DataFrame and check the data types.
###Code
dcp_dict = fc_to_dict(dcp_stat_fc).getInfo()
dcp_df = pd.DataFrame(dcp_dict)
display(dcp_df)
print(dcp_df.dtypes)
###Output
_____no_output_____
###Markdown
7. Add date attribute columns.8. Preview the DataFrame.
###Code
dcp_df = add_date_info(dcp_df)
dcp_df.head(5)
###Output
_____no_output_____
###Markdown
9. Convert precipitation rate to mm.10. Convert Kelvin to celsius.11. Add the model name as a column.12. Remove the 'Precip-rate' column.
###Code
dcp_df['Precip-mm'] = dcp_df['Precip-rate'] * 86400 * 30
dcp_df['Temp-mean'] = dcp_df['Temp-mean'] - 273.15
dcp_df['Model'] = 'NEX-DCP30'
dcp_df = dcp_df.drop('Precip-rate', 1)
dcp_df.head(5)
###Output
_____no_output_____
###Markdown
Past climatePRISM data are climate datasets for the conterminous United States. Grid cells are interpolated based on station data assimilated from many networks across the country. The datasets used here are monthly averages for precipitation and temperature. They provide a record of historical climate. Reduce collection and prepare DataFrame1. Import the collection and filter by date.2. Reduce the collection images by region and filter null computed values.3. Convert the feature collection to a dictionary and transfer it client-side.*Note: if the process times out, you'll need to export/import the `prism_stat_fc` feature collection as described in the **Optional export** section*.4. Convert the dictionary to a DataFrame.5. Preview the DataFrame.
###Code
prism_col = (ee.ImageCollection('OREGONSTATE/PRISM/AN81m')
.select(['ppt', 'tmean'])
.filter(ee.Filter.date('1979-01-01', '2019-12-31')))
reduce_prism = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310')
prism_stat_fc = (ee.FeatureCollection(prism_col.map(reduce_prism))
.filter(ee.Filter.notNull(prism_col.first().bandNames())))
prism_dict = fc_to_dict(prism_stat_fc).getInfo()
prism_df = pd.DataFrame(prism_dict)
display(prism_df)
print(prism_df.dtypes)
###Output
_____no_output_____
###Markdown
6. Add date attribute columns.7. Add model name.8. Rename columns to be consistent with the NEX-DCP30 DataFrame.9. Preview the DataFrame.
###Code
prism_df = add_date_info(prism_df)
prism_df['Model'] = 'PRISM'
prism_df = prism_df.rename(columns={'ppt': 'Precip-mm', 'tmean': 'Temp-mean'})
prism_df.head(5)
###Output
_____no_output_____
###Markdown
Combine DataFrames At this point the PRISM and NEX-DCP30 DataFrames have the same columns, the same units, and are distinguished by unique entries in the 'Model' column. Use the `concat` function to concatenate these DataFrames into a single DataFrame for plotting together in the same chart.
###Code
climate_df = pd.concat([prism_df, dcp_df], sort=True)
climate_df
###Output
_____no_output_____
###Markdown
ChartsChart the past and future precipitation and temperature together to get a sense for where climate has been and where it is projected to go under RCP 8.5. Precipitation
###Code
base = alt.Chart(climate_df).encode(
x='Year:O',
color='Model')
line = base.mark_line().encode(
y=alt.Y('median(Precip-mm):Q', title='Precipitation (mm/month)'))
band = base.mark_errorband(extent='iqr').encode(
y=alt.Y('Precip-mm:Q', title='Precipitation (mm/month)'))
(band + line).properties(width=600, height=300)
###Output
_____no_output_____
###Markdown
Temperature
###Code
line = alt.Chart(climate_df).mark_line().encode(
x='Year:O',
y='median(Temp-mean):Q',
color='Model')
band = alt.Chart(climate_df).mark_errorband(extent='iqr').encode(
x='Year:O',
y=alt.Y('Temp-mean:Q', title='Temperature (°C)'), color='Model')
(band + line).properties(width=600, height=300)
###Output
_____no_output_____ |
Model Definition and Training.ipynb | ###Markdown
MODEL DEFINITION, TRAINING AND DEPLOYMENT NOTEBOOK
###Code
#Install PySpark
!pip install pyspark==2.4.5
###Output
Requirement already satisfied: pyspark==2.4.5 in /opt/conda/envs/Python36/lib/python3.6/site-packages (2.4.5)
Requirement already satisfied: py4j==0.10.7 in /opt/conda/envs/Python36/lib/python3.6/site-packages (from pyspark==2.4.5) (0.10.7)
###Markdown
Now let's import PySpark modules.
###Code
#Import PySpark libraries
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
###Output
_____no_output_____
###Markdown
Let's create Spark instance.
###Code
#Create Spark instance
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
spark = SparkSession \
.builder \
.getOrCreate()
###Output
_____no_output_____
###Markdown
Now we read the final parquet file into Spark and create a dataframe instance.
###Code
#Read parquet file as a Spark dataframe
df = spark.read.parquet('df_final.parquet')
df.createOrReplaceTempView('sensor_data')
#Display statistical description of the dataframe
df.describe().show()
###Output
+-------+------------------+--------------------+------------------+--------------------+--------------------+
|summary| humidity| smoke| temp| light_final| motion_final|
+-------+------------------+--------------------+------------------+--------------------+--------------------+
| count| 187451| 187451| 187451| 187451| 187451|
| mean| 50.81407674538176|0.022288350217858362|22.279969165276523|0.016009517153816197| 7.68200756464356E-4|
| stddev|1.8889262809920346|0.001720104166737...|0.4819022291417346| 0.12551213707790115|0.027705860735184988|
| min| 45.1|0.019416273090454256| 21.0| 0| 0|
| max| 63.3| 0.02942197406197568| 24.1| 1| 1|
+-------+------------------+--------------------+------------------+--------------------+--------------------+
###Markdown
Let's check if the dataframe is the right one by printing it.
###Code
#Display first 20 records of the dataframe
df.show()
###Output
+--------+--------------------+----+-----------+------------+
|humidity| smoke|temp|light_final|motion_final|
+--------+--------------------+----+-----------+------------+
| 51.0| 0.02041127012241292|22.7| 0| 0|
| 50.9| 0.02047512557617824|22.6| 0| 0|
| 50.9|0.020447620810233658|22.6| 0| 0|
| 50.9|0.020475166204362245|22.6| 0| 0|
| 50.9| 0.02045681960706413|22.6| 0| 0|
| 50.9| 0.02042485815208522|22.6| 0| 0|
| 50.9|0.020461237669931027|22.6| 0| 0|
| 50.9|0.020438716650667384|22.6| 0| 0|
| 50.9|0.020475033620232192|22.6| 0| 0|
| 50.9| 0.02042485815208522|22.6| 0| 0|
| 50.9| 0.02041184473336307|22.6| 0| 0|
| 50.8| 0.0203978759436991|22.6| 0| 0|
| 50.9| 0.02042943583639171|22.6| 0| 0|
| 50.9|0.020316591192420567|22.6| 0| 0|
| 50.9| 0.02036609998281239|22.6| 0| 0|
| 50.9| 0.02042485815208522|22.6| 0| 0|
| 50.9|0.020276445922912403|22.6| 0| 0|
| 50.9|0.020470443763956046|22.7| 0| 0|
| 50.9| 0.02045691146558556|22.7| 0| 0|
| 50.9|0.020475033620232192|22.6| 0| 0|
+--------+--------------------+----+-----------+------------+
only showing top 20 rows
###Markdown
From the graphs in the **Data Exploration Notebook** we observed that the records where motion is detected is very sparse.This is an issue commonly referred to as *data imbalance*.Hence, the ML model will simply learn to provide 0 as output since that will still provide 95%+ accuracy.We do not want that, hence we *balance* the data by **sampling the data** such that the data is balanced.We start off by getting details about the records having 1 as motion_final.
###Code
#Filter the dataframe into a new dataframe df_1 which contains only the records where motion_final is 1
df_1 = df.where((df.motion_final == 1))
#Display statistical description of the new dataframe df_1
df_1.describe().show()
###Output
+-------+------------------+--------------------+------------------+-------------------+------------+
|summary| humidity| smoke| temp| light_final|motion_final|
+-------+------------------+--------------------+------------------+-------------------+------------+
| count| 144| 144| 144| 144| 144|
| mean| 50.32569444444445|0.022065659524688867|22.278472222222213|0.04861111111111111| 1.0|
| stddev|1.7420647875910713| 0.00184460745334887| 0.391547618667941|0.21580427259784288| 0.0|
| min| 46.1| 0.01988092642782673| 21.6| 0| 1|
| max| 54.6|0.027508421103950638| 23.4| 1| 1|
+-------+------------------+--------------------+------------------+-------------------+------------+
###Markdown
Now we observe that 144 records have 1 as motion_final.Hence, we need to sample the values that have 0 as motion_final such that the sampled values have approx 144 values as well.We accomplish this by using a sampling rate of 0.0008 on the data where motion_final is 0.
###Code
#Filter the main dataframe again into a new dataframe df_2 which contains only the records where motion_final is 0 and randomly sample only 0.08% of it to roughly obtain 150 records
df_2 = df.where((df.motion_final == 0)).sample(False, 0.0008)
df_2.describe().show()
###Output
+-------+-----------------+--------------------+------------------+-------------------+------------+
|summary| humidity| smoke| temp| light_final|motion_final|
+-------+-----------------+--------------------+------------------+-------------------+------------+
| count| 160| 160| 160| 160| 160|
| mean| 50.845625|0.022506956236353994|22.285000000000004| 0.00625| 0.0|
| stddev|1.897825530780694|0.001751438845133...| 0.450045420489027|0.07905694150420947| 0.0|
| min| 46.2|0.020041577054495552| 21.2| 0| 0|
| max| 54.6| 0.02749299762823611| 23.9| 1| 0|
+-------+-----------------+--------------------+------------------+-------------------+------------+
###Markdown
Now that we have both the processed dataframes, we can go ahead and **merge** them.
###Code
#Merge df_1 and df_2 to create a new dataframe df_final
df_final = df_1.unionAll(df_2)
###Output
_____no_output_____
###Markdown
Let's check the statistics of the merged dataframe.
###Code
#Display the statistical description of the merged dataframe
df_final.describe().show()
#Display first 20 records of the merged dataframe
df_final.show()
###Output
+--------+--------------------+----+-----------+------------+
|humidity| smoke|temp|light_final|motion_final|
+--------+--------------------+----+-----------+------------+
| 51.8|0.019998137876408518|22.2| 0| 1|
| 51.8|0.020032820812921674|22.3| 0| 1|
| 51.9|0.020020009199106837|22.3| 0| 1|
| 52.0| 0.01999377048749705|22.3| 0| 1|
| 52.0| 0.0200546885263529|22.3| 0| 1|
| 51.9| 0.02005924556696428|22.2| 0| 1|
| 51.9|0.020024037643655297|22.2| 0| 1|
| 51.1|0.020063629996772667|22.2| 0| 1|
| 50.6| 0.02004151412273946|22.2| 0| 1|
| 49.5|0.020024037643655297|21.8| 0| 1|
| 50.0|0.020059071769776702|21.8| 0| 1|
| 50.1| 0.02010314137587005|21.8| 0| 1|
| 50.1|0.020045893936615555|21.8| 0| 1|
| 51.0| 0.02004151412273946|21.8| 0| 1|
| 52.6|0.020024037643655297|21.8| 0| 1|
| 52.6| 0.02003726127961487|21.9| 0| 1|
| 52.6|0.020041577054495552|21.9| 0| 1|
| 51.5|0.020032820812921674|21.9| 0| 1|
| 51.5| 0.01988092642782673|21.9| 0| 1|
| 51.7| 0.02004151412273946|21.9| 0| 1|
+--------+--------------------+----+-----------+------------+
only showing top 20 rows
###Markdown
Since the dataframes were merged vertically, the data is split internally and hence may result in bad performance while trained.Hence we **shuffle** the dataframe to maintain data diversity.
###Code
#Import module which enables random shuffling of Spark dataframe
from pyspark.sql.functions import rand
#Randomly shuffle the records of the dataframe and name it df
df = df_final.orderBy(rand())
df.show()
###Output
+--------+--------------------+----+-----------+------------+
|humidity| smoke|temp|light_final|motion_final|
+--------+--------------------+----+-----------+------------+
| 51.2|0.024295005792873352|22.4| 0| 0|
| 46.5| 0.02106923987802722|23.9| 0| 0|
| 46.3|0.021209358505977532|23.7| 0| 0|
| 52.6|0.020543606849059416|22.8| 0| 1|
| 52.3|0.024756113729318914|21.6| 0| 1|
| 52.8| 0.02388495395368051|21.9| 0| 0|
| 51.8|0.019998137876408518|22.2| 0| 1|
| 52.1|0.020534427566983336|22.5| 0| 0|
| 47.0|0.021828032423057725|23.2| 0| 0|
| 52.2| 0.02352238795170069|21.9| 0| 0|
| 52.2| 0.02481460282330909|21.7| 0| 1|
| 51.5|0.020316591192420567|22.7| 0| 0|
| 50.4|0.024177195138253855|21.8| 0| 1|
| 47.9| 0.02150447792794127|21.9| 0| 1|
| 50.1|0.020045893936615555|21.8| 0| 1|
| 52.4|0.023848741909302598|21.8| 0| 0|
| 48.3|0.021011771023211667|21.9| 0| 1|
| 49.7|0.020289762204606602|23.1| 0| 0|
| 48.9| 0.02152947900919568|23.4| 0| 0|
| 46.5|0.022314007967695275|23.1| 0| 0|
+--------+--------------------+----+-----------+------------+
only showing top 20 rows
###Markdown
Perfect!Now that the dataframe is ready, we can proceed to feed it into the ML models.But, first off we have to define the ML models:1. Classifiers - Gradient Boosted Trees - Logistic Regression 2. Deep learning ModelLet's go ahead and define the **pipelines** for the classification models *with* and *without StandardScaler* to compare the performance.
###Code
#Import VectorAssembler module
from pyspark.ml.feature import VectorAssembler
#Define an instance of VectorAssembler
vectorAssembler = VectorAssembler(inputCols = ["humidity","smoke","temp","light_final"], outputCol = "features")
#Import StandardScaler module
from pyspark.ml.feature import StandardScaler
#Define an instance of StandardScaler
scaler = StandardScaler(inputCol="features", outputCol="features_final", withStd=True, withMean=False)
#Import ML classification modules
from pyspark.ml.classification import GBTClassifier, LogisticRegression
#Create instances of the two classification modules which are used with StandardScaler
gbt_classifier_wscaler = GBTClassifier(labelCol = "motion_final", featuresCol = "features_final", maxIter = 10)
lr_classifier_wscaler = LogisticRegression(labelCol = "motion_final", featuresCol = "features_final", maxIter=10, regParam=0.3, elasticNetParam=0.8)
#Create instances of the two classification modules which do not use StandardScaler
gbt_classifier_woscaler = GBTClassifier(labelCol = "motion_final", featuresCol = "features", maxIter = 10)
lr_classifier_woscaler = LogisticRegression(labelCol = "motion_final", featuresCol = "features", maxIter=10, regParam=0.3, elasticNetParam=0.8)
#Import Pipeline module
from pyspark.ml import Pipeline
#Define pipelines for the two classification modules with StandardScaler
pipeline_gbt_wscaler = Pipeline(stages=[vectorAssembler,scaler, gbt_classifier_wscaler])
pipeline_lr_wscaler = Pipeline(stages=[vectorAssembler,scaler, lr_classifier_wscaler])
#Define pipelines for the two classification modules without StandardScaler
pipeline_gbt_woscaler = Pipeline(stages=[vectorAssembler, gbt_classifier_woscaler])
pipeline_lr_woscaler = Pipeline(stages=[vectorAssembler, lr_classifier_woscaler])
###Output
_____no_output_____
###Markdown
Now that the pipelines are defined, we can go ahead and fit the dataframe into the pipelines and train the models.
###Code
#Fit the dataframe to the defined pipelines
model_gbt_wscaler = pipeline_gbt_wscaler.fit(df)
model_gbt_woscaler = pipeline_gbt_woscaler.fit(df)
model_lr_wscaler = pipeline_lr_wscaler.fit(df)
model_lr_woscaler = pipeline_lr_woscaler.fit(df)
#Transform the dataframes by predicting motion_light using the fitted classification modules
prediction_gbt_wscaler = model_gbt_wscaler.transform(df)
prediction_gbt_woscaler = model_gbt_woscaler.transform(df)
prediction_lr_wscaler = model_lr_wscaler.transform(df)
prediction_lr_woscaler = model_lr_woscaler.transform(df)
#Display the dataframe which predicted the output using GBT with StandardScaler
prediction_gbt_wscaler.show()
#Display the dataframe which predicted the output using GBT without StandardScaler
prediction_gbt_woscaler.show()
#Display the dataframe which predicted the output using LogisticRegression with StandardScaler
prediction_lr_wscaler.show()
#Display the dataframe which predicted the output using LogisticRegression without StandardScaler
prediction_lr_woscaler.show()
###Output
+--------+--------------------+----+-----------+------------+--------------------+--------------------+--------------------+----------+
|humidity| smoke|temp|light_final|motion_final| features| rawPrediction| probability|prediction|
+--------+--------------------+----+-----------+------------+--------------------+--------------------+--------------------+----------+
| 51.2|0.024295005792873352|22.4| 0| 0|[51.2,0.024295005...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 46.5| 0.02106923987802722|23.9| 0| 0|[46.5,0.021069239...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 46.3|0.021209358505977532|23.7| 0| 0|[46.3,0.021209358...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 52.6|0.020543606849059416|22.8| 0| 1|[52.6,0.020543606...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 52.3|0.024756113729318914|21.6| 0| 1|[52.3,0.024756113...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 52.8| 0.02388495395368051|21.9| 0| 0|[52.8,0.023884953...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 51.8|0.019998137876408518|22.2| 0| 1|[51.8,0.019998137...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 52.1|0.020534427566983336|22.5| 0| 0|[52.1,0.020534427...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 47.0|0.021828032423057725|23.2| 0| 0|[47.0,0.021828032...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 52.2| 0.02352238795170069|21.9| 0| 0|[52.2,0.023522387...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 52.2| 0.02481460282330909|21.7| 0| 1|[52.2,0.024814602...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 51.5|0.020316591192420567|22.7| 0| 0|[51.5,0.020316591...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 50.4|0.024177195138253855|21.8| 0| 1|[50.4,0.024177195...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 47.9| 0.02150447792794127|21.9| 0| 1|[47.9,0.021504477...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 50.1|0.020045893936615555|21.8| 0| 1|[50.1,0.020045893...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 52.4|0.023848741909302598|21.8| 0| 0|[52.4,0.023848741...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 48.3|0.021011771023211667|21.9| 0| 1|[48.3,0.021011771...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 49.7|0.020289762204606602|23.1| 0| 0|[49.7,0.020289762...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 48.9| 0.02152947900919568|23.4| 0| 0|[48.9,0.021529479...|[0.10536051565782...|[0.52631578947368...| 0.0|
| 46.5|0.022314007967695275|23.1| 0| 0|[46.5,0.022314007...|[0.10536051565782...|[0.52631578947368...| 0.0|
+--------+--------------------+----+-----------+------------+--------------------+--------------------+--------------------+----------+
only showing top 20 rows
###Markdown
We can observe the *prediction* column in the predicted dataframes containing the values predicted by the models.Now we can go ahead and **evaluate** the performance of the models.
###Code
#Import module used for model evaluation
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
#Create instance of the evaluator which evaluates the models based on accuracy
binEval = MulticlassClassificationEvaluator().setMetricName("accuracy").setPredictionCol("prediction").setLabelCol("motion_final")
#Use the created instance to evaluate all the models
gbt_acc_wscaler = binEval.evaluate(prediction_gbt_wscaler)
gbt_acc_woscaler = binEval.evaluate(prediction_gbt_woscaler)
lr_acc_wscaler = binEval.evaluate(prediction_lr_wscaler)
lr_acc_woscaler = binEval.evaluate(prediction_lr_woscaler)
###Output
_____no_output_____
###Markdown
Now we can move on to the *deep-learning model* using **keras** modules.
###Code
#Import necessary modules required to implement a DNN
import keras
from keras.models import Sequential
from keras.layers import Dense,Dropout
###Output
Using TensorFlow backend.
###Markdown
Since the modules are imported, we now define the neural-network models for data both *scaled* and *unscaled* data.
###Code
#Construct a neural network model that uses scaled data
model_nn_wscaler = Sequential()
model_nn_wscaler.add(Dense(output_dim = 32, init = 'uniform', activation = 'relu', input_dim = 4))
#model_nn.add(Dropout(0.2))
model_nn_wscaler.add(Dense(64, init = 'uniform', activation = 'relu'))
#output layer with 1 output neuron which will predict 1 or 0
model_nn_wscaler.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
#Construct a neural network model that uses unscaled data
model_nn_woscaler = Sequential()
model_nn_woscaler.add(Dense(output_dim = 32, init = 'uniform', activation = 'relu', input_dim = 4))
#model_nn.add(Dropout(0.2))
model_nn_woscaler.add(Dense(64, init = 'uniform', activation = 'relu'))
#output layer with 1 output neuron which will predict 1 or 0
model_nn_woscaler.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
###Output
/opt/conda/envs/Python36/lib/python3.6/site-packages/ipykernel/__main__.py:5: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="relu", input_dim=4, units=32, kernel_initializer="uniform")`
/opt/conda/envs/Python36/lib/python3.6/site-packages/ipykernel/__main__.py:7: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(64, activation="relu", kernel_initializer="uniform")`
/opt/conda/envs/Python36/lib/python3.6/site-packages/ipykernel/__main__.py:10: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="sigmoid", units=1, kernel_initializer="uniform")`
###Markdown
We now set the **hyperparameters** for the optimizer we are using and *compile* the models.
###Code
#Adjust ADAM parameters
keras.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, amsgrad=False)
#Compile both the models
model_nn_wscaler.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model_nn_woscaler.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Now we go ahead and prepare *training data* to train the models respectively.
###Code
#Prepare training data
import numpy as np
X_train_wscaler = np.array(prediction_lr_wscaler.select('features_final').rdd.flatMap(lambda x:x).collect())
X_train_woscaler = np.array(prediction_lr_woscaler.select('features').rdd.flatMap(lambda x:x).collect())
y_train = df.select('motion_final').rdd.flatMap(lambda x:x).collect()
X_train_wscaler
###Output
_____no_output_____
###Markdown
Now that both the models and the data are ready, we can move ahead to *model training*.
###Code
#Train the model that uses scaled data
model_nn_wscaler.fit(X_train_wscaler, y_train, epochs = 10, validation_split = 0.2)
#Train the model that uses unscaled data
model_nn_woscaler.fit(X_train_woscaler, y_train, epochs = 10, validation_split = 0.2)
#Evaluate the model that uses scaled data
evaluate_nn_wscaler = model_nn_wscaler.evaluate(X_train_wscaler, y_train)
nn_acc_wscaler = evaluate_nn_wscaler[1]
#Evaluate the model that uses unscaled data
evaluate_nn_woscaler = model_nn_woscaler.evaluate(X_train_woscaler, y_train)
nn_acc_woscaler = evaluate_nn_woscaler[1]
#Use the trained models to predict values for motion_final
prediction_nn_wscaler = model_nn_wscaler.predict(X_train_wscaler)
prediction_nn_woscaler = model_nn_woscaler.predict(X_train_woscaler)
###Output
_____no_output_____
###Markdown
Let's examine the accuracy obtained by all the models.
###Code
#Print the accuracy of all three models that used scaled data
print("Accuracy of prediction by GBT Classifier with data normalisation is",gbt_acc_wscaler)
print()
print("Accuracy of prediction by Logistic Regression with data normalisation is",lr_acc_wscaler)
print()
print("Accuracy of prediction by Neural Network model with data normalisation is",nn_acc_wscaler)
#Print the accuracy of all three models that used unscaled data
print("Accuracy of prediction by GBT Classifier without data normalisation is",gbt_acc_woscaler)
print()
print("Accuracy of prediction by Logistic Regression without data normalisation is",lr_acc_woscaler)
print()
print("Accuracy of prediction by Neural Network model without data normalisation is",nn_acc_woscaler)
###Output
Accuracy of prediction by GBT Classifier without data normalisation is 0.881578947368421
Accuracy of prediction by Logistic Regression without data normalisation is 0.5263157894736842
Accuracy of prediction by Neural Network model without data normalisation is 0.5263157894736842
###Markdown
We notice that both the *logistic regression model* as well as the *neural network model* have the same accruracy of 50.5% which is pretty low.Now we can scroll up to look at the predictions of both these models and notice that both are prediction **0 for all values**.We infer that both these models have learnt to provide 0 as output always and hence there is no useful prediction happening.On the other hand, the *gradient boosted trees classifier* is performing as expected with an accuracy of 90.7% which is decent enough.Hence, we can conclude that **gradient boosted trees classifier** is more robust to data imbalance.Also, while comparing the results *with* and *without scaling the data*, we notice that there is not much difference in the performance of both models.This could be justfied by the good quality of the dataset.Now we can go ahead and obtain the **F1 score** of these models to understand how well they are performing.
###Code
#Print F1 score of all three models that used scaled data
from sklearn.metrics import f1_score
log_f1 = f1_score(prediction_lr_wscaler.select('motion_final').collect(), prediction_lr_wscaler.select('prediction').collect())
print("The F1 score for logistic regressor model with data normalisation is:",log_f1)
print()
gbt_f1 = f1_score(prediction_gbt_wscaler.select('motion_final').collect(), prediction_gbt_wscaler.select('prediction').collect())
print("The F1 score for GBT model with data normalisation is:",gbt_f1)
print()
nn_f1 = f1_score(y_train, model_nn_wscaler.predict_classes(X_train_wscaler).reshape((-1,)))
print("The F1 score of the neural network with data normalisation is:",nn_f1)
#Print F1 score of all three models that used unscaled data
log_f1 = f1_score(prediction_lr_woscaler.select('motion_final').collect(), prediction_lr_woscaler.select('prediction').collect())
print("The F1 score for logistic regressor model without data normalisation is:",log_f1)
print()
gbt_f1 = f1_score(prediction_gbt_woscaler.select('motion_final').collect(), prediction_gbt_woscaler.select('prediction').collect())
print("The F1 score for GBT model without data normalisation is:",gbt_f1)
print()
nn_f1 = f1_score(y_train, model_nn_woscaler.predict_classes(X_train_woscaler).reshape((-1,)))
print("The F1 score of the neural network without data normalisation is:",nn_f1)
###Output
The F1 score for logistic regressor model without data normalisation is: 0.0
The F1 score for GBT model without data normalisation is: 0.8758620689655173
The F1 score of the neural network without data normalisation is: 0.0
###Markdown
As expected, only the *gbt classifier* has an F1 score and the other two have 0.Hence, we can now do further evaluation of the *gbt classifier* alone using **confusion matrix**.
###Code
#Form the confusion matrix for the GBT Classifier's performance
from sklearn.metrics import confusion_matrix
import seaborn as sns
y_pred = prediction_gbt_wscaler.select('prediction').rdd.flatMap(lambda x:x).collect()
cf_matrix = confusion_matrix(y_train, y_pred)
print(cf_matrix)
#Plot the confusion matrix
sns.set(rc={'figure.figsize':(11.7,8.27), "axes.titlesize":30})
group_names = ["True Neg","False Pos","False Neg","True Pos"]
group_counts = ["{0:0.0f}".format(value) for value in
cf_matrix.flatten()]
group_percentages = ["{0:.2%}".format(value) for value in
cf_matrix.flatten()/np.sum(cf_matrix)]
labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in
zip(group_names,group_counts,group_percentages)]
labels = np.asarray(labels).reshape(2,2)
sns.heatmap(cf_matrix, annot=labels, fmt="", cmap='Blues').set_title('Confusion Matrix\n')
###Output
_____no_output_____ |
analysis/Functions.ipynb | ###Markdown
Method ChainsBelow are the functions we created to perform the analysis
###Code
def load_and_process(path):
# Method Chain 1 (Load data and deal with missing data)
df1 = (
pd.read_csv(path)
.sort_values("Date", ascending=True)
.dropna()
.assign(Percentage_Positive_Population=df["Total_Cases"] / df["Population"] * 100)
)
df2 = (
df1
df.drop("Active_Cases",axis=1,inplace=True)
df.drop("Serious_or_Critical",axis=1,inplace=True)
df.drop("Total_Tests",axis=1,inplace=True)
)
# Make sure to return the latest dataframe
return df1
def getDailyCases(df):
dc = [0]
tc = list(df["Total_Cases"])
i = 0
while (i < len(tc)):
try:
change = (int(tc[i+1] - tc[i]))
dc.append(change)
i+=1
except:
break
df["New Cases"] = dc
return df
def getDailyDeaths(df):
dd = [0]
td = list(df["Total_Deaths"])
i = 0
while (i < len(td)):
try:
change = (int(td[i+1] - td[i]))
dd.append(change)
i+=1
except:
break
df["New Deaths"] = dd
return df
###Output
_____no_output_____ |
notebooks/13.- Vectores.ipynb | ###Markdown
Vectores
###Code
from sympy import *
init_printing(use_latex='mathjax')
x, y, z = symbols('x, y, z')
###Output
_____no_output_____
###Markdown
En Sympy un vector es simplemente una matriz fila y por ello para crear vectores debemos emplear "el constructor" **Matrix** (obligatorio con mayúscula). El vector es una lista de Python, y como tal sus coordenadas deben ir entre corchetes y separadas por comas. En realidad también se pueden usar los paréntesis, pero por motivos que no vienen al caso es mejor utilizar corchetes.Sympy puede manejar vectores de cualquier dimensión, pero aquí trabajaremos en dimensión 3. Dados los vectores $u=(4,3,-1)$ y $v=(2,9,1)$ realiza combinaciones lineales con ellos.
###Code
v= Matrix([5,-3,6])
u.dot(u)
###Output
_____no_output_____
###Markdown
Para calcular el producto escalar se utiliza el método **dot** y para el producto vectorial **cross**. Para el módulo (o norma) de un empleamos el método **norm**. Calcula el producto escalar y vectorial de los vectores anteriores. Comprueba que el producto escalar de un vector consigo mismo es el cuadrado del módulo. Comprueba que el producto vectorial es antisimétrico y que es perpendicular a cada uno de los factores. El ángulo entre vectores se obtiene despejando $alpha$ en la fórmula:$$\cos(\alpha)=\frac{u \cdot v}{|u| \cdot|v|}$$ Calcula el ángulo que forman $u$ y $v$.
###Code
acos(u.dot(v)/(u.norm()*v.norm())).evalf()
###Output
_____no_output_____
###Markdown
Dado el vector $w=(7,5,2)$ comprueba que el producto vectorial no es asociativo.
###Code
plot()
u.
###Output
_____no_output_____ |
notebooks/goea_nbt3102_group_results.ipynb | ###Markdown
Group Significant GO terms by Frequently Seen WordsWe use data from a 2014 Nature paper: [Computational analysis of cell-to-cell heterogeneityin single-cell RNA-sequencing data reveals hidden subpopulations of cells](http://www.nature.com/nbt/journal/v33/n2/full/nbt.3102.htmlmethods) This iPython notebook demonstrates one approach to explore Gene Ontology Enrichment Analysis (GOEA) results:1. Create sub-plots containing significant GO terms which share a common word, like **RNA**.2. Create detailed reports showing all significant GO terms and all study gene symbols for the common word. SummaryThe code in this notebook generates the data used in this statement in the GOATOOLS manuscript: We observed: 93 genes associated with RNA, 47 genes associated with translation, 70 genes associated with mitochondrial or mitochondrian, and 37 genes associated with ribosomal, as reported by GOATOOLS. DetailsDetails summarized here are also found in the file, [nbt3102_GO_word_genes.txt](./doc/nbt3102_GO_word_genes.txt), which is generated by this iPython notebook. * **RNA**: 93 study genes, 6 GOs: 0) BP GO:0006364 rRNA processing (8 genes) 1) MF GO:0003723 RNA binding (32 genes) 2) MF GO:0003729 mRNA binding (11 genes) 3) MF GO:0008097 5S rRNA binding (4 genes) 4) MF GO:0019843 rRNA binding (6 genes) 5) MF GO:0044822 poly(A) RNA binding (86 genes) * **translation**: 47 study genes, 5 GOs: 0) BP GO:0006412 translation (41 genes) 1) BP GO:0006414 translational elongation (7 genes) 2) BP GO:0006417 regulation of translation (9 genes) 3) MF GO:0003746 translation elongation factor activity (5 genes) 4) MF GO:0031369 translation initiation factor binding (4 genes) * **mitochond**: 70 study genes, 8 GOs: 0) BP GO:0051881 regulation of mitochondrial membrane potential (7 genes) 1) CC GO:0000275 mitochondrial proton-transporting ATP synthase complex, catalytic core F(1) (3 genes) 2) CC GO:0005739 mitochondrion (68 genes) 3) CC GO:0005743 mitochondrial inner membrane (28 genes) 4) CC GO:0005747 mitochondrial respiratory chain complex I (5 genes) 5) CC GO:0005753 mitochondrial proton-transporting ATP synthase complex (4 genes) 6) CC GO:0005758 mitochondrial intermembrane space (7 genes) 7) CC GO:0031966 mitochondrial membrane (6 gene0) CC GO:0005925 focal adhesion (53 genes) * **ribosomal**: 37 study genes, 6 GOs: 0) BP GO:0000028 ribosomal small subunit assembly (9 genes) 1) BP GO:0042274 ribosomal small subunit biogenesis (6 genes) 2) CC GO:0015934 large ribosomal subunit (4 genes) 3) CC GO:0015935 small ribosomal subunit (13 genes) 4) CC GO:0022625 cytosolic large ribosomal subunit (16 genes) 5) CC GO:0022627 cytosolic small ribosomal subunit (19 genes) Also seen, but not reported in the manuscript: * **ribosome**: 41 study genes, 2 GOs: 0) CC GO:0005840 ribosome (35 genes) 1) MF GO:0003735 structural constituent of ribosome (38 genes) * **adhesion**: 53 study genes, 1 GOs: 0) CC GO:0005925 focal adhesion (53 genes) * **endoplasmic**: 49 study genes, 3 GOs: 0) CC GO:0005783 endoplasmic reticulum (48 genes) 1) CC GO:0005790 smooth endoplasmic reticulum (5 genes) 2) CC GO:0070971 endoplasmic reticulum exit site (4 genes) * **nucleotide**: 46 study genes, 1 GOs: 0) MF GO:0000166 nucleotide binding (46 genes) * **apoptotic**: 42 study genes, 2 GOs: 0) BP GO:0006915 apoptotic process (26 genes) 1) BP GO:0043066 negative regulation of apoptotic process (28 genes) Methodology For this exploration, we choose specific sets of GO terms for each plot based on frequently seen words in the GO term name. Examples of GO term names include "*rRNA processing*", "*poly(A) RNA binding*", and "*5S rRNA binding*". The common word for these GO terms is "*RNA*". Steps:1. Run a Gene Ontology Enrichment Analysis.2. Count all words in the significant GO term names.3. Inspect word-count list from step 2.4. Create curated list of words based on frequently seen GO term words.5. Get significant GO terms which contain the words of interest.6. Plot GO terms seen for each word of interest.7. Print a report with full details 1. Run GOEA. Save results.
###Code
%run goea_nbt3102_fncs.ipynb
goeaobj = get_goeaobj_nbt3102('fdr_bh')
# Read Nature data from Excel file (~400 study genes)
studygeneid2symbol = read_data_nbt3102()
# Run Gene Ontology Enrichment Analysis using Benjamini/Hochberg FDR correction
geneids_study = studygeneid2symbol.keys()
goea_results_all = goeaobj.run_study(geneids_study)
goea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.05]
###Output
EXISTS: go-basic.obo
go-basic.obo: fmt(1.2) rel(2019-04-17) 47,398 GO Terms
EXISTS: gene2go
HMS:0:00:06.668335 364,039 annotations READ: gene2go
1 taxids stored: 10090
Load BP Gene Ontology Analysis ...
fisher module not installed. Falling back on scipy.stats.fisher_exact
59% 16,747 of 28,212 population items found in association
Load CC Gene Ontology Analysis ...
fisher module not installed. Falling back on scipy.stats.fisher_exact
65% 18,276 of 28,212 population items found in association
Load MF Gene Ontology Analysis ...
fisher module not installed. Falling back on scipy.stats.fisher_exact
58% 16,418 of 28,212 population items found in association
Run BP Gene Ontology Analysis: current study set of 400 IDs ...
93% 357 of 382 study items found in association
96% 382 of 400 study items found in population(28212)
Calculating 12,189 uncorrected p-values using fisher_scipy_stats
12,189 GO terms are associated with 16,747 of 28,212 population items
2,068 GO terms are associated with 357 of 400 study items
METHOD fdr_bh:
70 GO terms found significant (< 0.05=alpha) ( 68 enriched + 2 purified): statsmodels fdr_bh
230 study items associated with significant GO IDs (enriched)
4 study items associated with significant GO IDs (purified)
Run CC Gene Ontology Analysis: current study set of 400 IDs ...
98% 376 of 382 study items found in association
96% 382 of 400 study items found in population(28212)
Calculating 1,724 uncorrected p-values using fisher_scipy_stats
1,724 GO terms are associated with 18,276 of 28,212 population items
445 GO terms are associated with 376 of 400 study items
METHOD fdr_bh:
92 GO terms found significant (< 0.05=alpha) ( 92 enriched + 0 purified): statsmodels fdr_bh
373 study items associated with significant GO IDs (enriched)
0 study items associated with significant GO IDs (purified)
Run MF Gene Ontology Analysis: current study set of 400 IDs ...
88% 338 of 382 study items found in association
96% 382 of 400 study items found in population(28212)
Calculating 4,128 uncorrected p-values using fisher_scipy_stats
4,128 GO terms are associated with 16,418 of 28,212 population items
581 GO terms are associated with 338 of 400 study items
METHOD fdr_bh:
56 GO terms found significant (< 0.05=alpha) ( 54 enriched + 2 purified): statsmodels fdr_bh
273 study items associated with significant GO IDs (enriched)
0 study items associated with significant GO IDs (purified)
###Markdown
2. Count all words in the significant GO term names. 2a. Get list of significant GO term names
###Code
from __future__ import print_function
go_names = [r.name for r in goea_results_sig]
print(len(go_names)) # Includes ONLY signficant results
###Output
218
###Markdown
2b. Get word count in significant GO term names
###Code
import collections as cx
word2cnt = cx.Counter([word for name in go_names for word in name.split()])
###Output
_____no_output_____
###Markdown
3. Inspect word-count list generated at step 2.Words like "mitochondrial" can be interesting. Some words will not be interesting, such as "of".
###Code
# Print 10 most common words found in significant GO term names
print(word2cnt.most_common(10))
###Output
[('binding', 40), ('of', 33), ('regulation', 25), ('protein', 23), ('cell', 20), ('complex', 20), ('activity', 18), ('positive', 15), ('process', 12), ('actin', 12)]
###Markdown
4. Create curated list of words based on frequently seen GO term words.
###Code
freq_seen = ['RNA', 'translation', 'mitochond', 'ribosomal', 'ribosome',
'adhesion', 'endoplasmic', 'nucleotide', 'apoptotic']
###Output
_____no_output_____
###Markdown
5. For each word of interest, create a list of significant GOs whose name contains the word.
###Code
# Collect significant GOs for words in freq_seen (unordered)
word2siggos = cx.defaultdict(set)
# Loop through manually curated words of interest
for word in freq_seen:
# Check each significant GOEA result for the word of interest
for rec in goea_results_sig:
if word in rec.name:
word2siggos[word].add(rec.GO)
# Sort word2gos to have the same order as words in freq_seen
word2siggos = cx.OrderedDict([(w, word2siggos[w]) for w in freq_seen])
###Output
_____no_output_____
###Markdown
6. Plot GO terms seen for each word of interest. 6a. Create a convenient goid-to-goobject dictionary
###Code
goid2goobj_all = {nt.GO:nt.goterm for nt in goea_results_all}
print(len(goid2goobj_all))
###Output
18041
###Markdown
6b. Create plots formed by a shared word in the significant GO term's name
###Code
# Plot set of GOs for each frequently seen word
from goatools.godag_plot import plot_goid2goobj
for word, gos in word2siggos.items():
goid2goobj = {go:goid2goobj_all[go] for go in gos}
plot_goid2goobj(
"nbt3102_word_{WORD}.png".format(WORD=word),
goid2goobj, # source GOs to plot and their GOTerm object
study_items=15, # Max number of gene symbols to print in each GO term
id2symbol=studygeneid2symbol, # Contains GeneID-to-Symbol from Step 1
goea_results=goea_results_all, # pvals used for GO Term coloring
dpi=150)
###Output
6 usr 30 GOs WROTE: nbt3102_word_RNA.png
6 usr 67 GOs WROTE: nbt3102_word_translation.png
11 usr 70 GOs WROTE: nbt3102_word_mitochond.png
6 usr 28 GOs WROTE: nbt3102_word_ribosomal.png
4 usr 19 GOs WROTE: nbt3102_word_ribosome.png
1 usr 7 GOs WROTE: nbt3102_word_adhesion.png
4 usr 34 GOs WROTE: nbt3102_word_endoplasmic.png
1 usr 7 GOs WROTE: nbt3102_word_nucleotide.png
2 usr 16 GOs WROTE: nbt3102_word_apoptotic.png
###Markdown
6c. Example plot for "apoptotic"**Colors**:Please note that to have colors related to GOEA significance, you must provide the GOEA results, as shown here with the "goea_results=goea_results_all" argument. 1. **Levels of Statistical Significance**: 1. **light red** => *extremely significant* fdr_bh values (p<0.005) 2. **orange** => *very significant* fdr_bh values (p<0.01) 2. **yellow** => *significant* fdr_bh values (p<0.05) 3. **grey** => study terms which are ***not*** *statistically significant* (p>0.05)2. **High-level GO terms**: 1. **Cyan** => Level-01 GO terms *Please note* that the variable, *goea_results_all*, contains gene ids and fdr_bh alpha values for **all** study GO terms, significant or not. If the argument had only included the significant results, "goea_results=goea_results_sig", the currently colored grey GO terms would be white and would not have study genes annotated inside.**Gene Symbol Names** Please notice that the study gene symbol names are written in thier associated GO term box. Symbol names and not gene count nor gene ids are used because of the argument, "id2symbol=studygeneid2symbol", to the function, "plot_goid2goobj". 7. Print a report with full details 7a. Create detailed report
###Code
fout = "nbt3102_GO_word_genes.txt"
go2res = {nt.GO:nt for nt in goea_results_all}
with open(fout, "w") as prt:
prt.write("""This file is generated by test_nbt3102.py and is intended to confirm
this statement in the GOATOOLS manuscript:
We observed:
93 genes associated with RNA,
47 genes associated with translation,
70 genes associated with mitochondrial or mitochondrian, and
37 genes associated with ribosomal, as reported by GOATOOLS.
""")
for word, gos in word2siggos.items():
# Sort first by BP, MF, CC. Sort second by GO id.
gos = sorted(gos, key=lambda go: [go2res[go].NS, go])
genes = set()
for go in gos:
genes |= go2res[go].study_items
genes = sorted([studygeneid2symbol[g] for g in genes])
prt.write("\n{WD}: {N} study genes, {M} GOs\n".format(WD=word, N=len(genes), M=len(gos)))
prt.write("{WD} GOs: {GOs}\n".format(WD=word, GOs=", ".join(gos)))
for i, go in enumerate(gos):
res = go2res[go]
prt.write("{I}) {NS} {GO} {NAME} ({N} genes)\n".format(
I=i, NS=res.NS, GO=go, NAME=res.name, N=res.study_count))
prt.write("{N} study genes:\n".format(N=len(genes)))
N = 10 # Number of genes per line
mult = [genes[i:i+N] for i in range(0, len(genes), N)]
prt.write(" {}\n".format("\n ".join([", ".join(str(g) for g in sl) for sl in mult])))
print(" WROTE: {F}\n".format(F=fout))
###Output
WROTE: nbt3102_GO_word_genes.txt
###Markdown
Group Significant GO terms by Frequently Seen WordsWe use data from a 2014 Nature paper: [Computational analysis of cell-to-cell heterogeneityin single-cell RNA-sequencing data reveals hidden subpopulations of cells](http://www.nature.com/nbt/journal/v33/n2/full/nbt.3102.htmlmethods) 2016 GOATOOLS Manuscript This iPython notebook demonstrates one approach to explore Gene Ontology Enrichment Analysis (GOEA) results:1. Create sub-plots containing significant GO terms which share a common word, like **RNA**.2. Create detailed reports showing all significant GO terms and all study gene symbols for the common word. 2016 GOATOOLS Manuscript text, summaryThe code in this notebook generates the data used in this statement in the GOATOOLS manuscript: We observed: 93 genes associated with RNA, 47 genes associated with translation, 70 genes associated with mitochondrial or mitochondrian, and 37 genes associated with ribosomal, as reported by GOATOOLS. 2016 GOATOOLS Manuscript text, detailsDetails summarized here are also found in the file, [nbt3102_GO_word_genes.txt](./doc/nbt3102_GO_word_genes.txt), which is generated by this iPython notebook. * **RNA**: 93 study genes, 6 GOs: 0) BP GO:0006364 rRNA processing (8 genes) 1) MF GO:0003723 RNA binding (32 genes) 2) MF GO:0003729 mRNA binding (11 genes) 3) MF GO:0008097 5S rRNA binding (4 genes) 4) MF GO:0019843 rRNA binding (6 genes) 5) MF GO:0044822 poly(A) RNA binding (86 genes) * **translation**: 47 study genes, 5 GOs: 0) BP GO:0006412 translation (41 genes) 1) BP GO:0006414 translational elongation (7 genes) 2) BP GO:0006417 regulation of translation (9 genes) 3) MF GO:0003746 translation elongation factor activity (5 genes) 4) MF GO:0031369 translation initiation factor binding (4 genes) * **mitochond**: 70 study genes, 8 GOs: 0) BP GO:0051881 regulation of mitochondrial membrane potential (7 genes) 1) CC GO:0000275 mitochondrial proton-transporting ATP synthase complex, catalytic core F(1) (3 genes) 2) CC GO:0005739 mitochondrion (68 genes) 3) CC GO:0005743 mitochondrial inner membrane (28 genes) 4) CC GO:0005747 mitochondrial respiratory chain complex I (5 genes) 5) CC GO:0005753 mitochondrial proton-transporting ATP synthase complex (4 genes) 6) CC GO:0005758 mitochondrial intermembrane space (7 genes) 7) CC GO:0031966 mitochondrial membrane (6 gene0) CC GO:0005925 focal adhesion (53 genes) * **ribosomal**: 37 study genes, 6 GOs: 0) BP GO:0000028 ribosomal small subunit assembly (9 genes) 1) BP GO:0042274 ribosomal small subunit biogenesis (6 genes) 2) CC GO:0015934 large ribosomal subunit (4 genes) 3) CC GO:0015935 small ribosomal subunit (13 genes) 4) CC GO:0022625 cytosolic large ribosomal subunit (16 genes) 5) CC GO:0022627 cytosolic small ribosomal subunit (19 genes) Also seen, but not reported in the manuscript: * **ribosome**: 41 study genes, 2 GOs: 0) CC GO:0005840 ribosome (35 genes) 1) MF GO:0003735 structural constituent of ribosome (38 genes) * **adhesion**: 53 study genes, 1 GOs: 0) CC GO:0005925 focal adhesion (53 genes) * **endoplasmic**: 49 study genes, 3 GOs: 0) CC GO:0005783 endoplasmic reticulum (48 genes) 1) CC GO:0005790 smooth endoplasmic reticulum (5 genes) 2) CC GO:0070971 endoplasmic reticulum exit site (4 genes) * **nucleotide**: 46 study genes, 1 GOs: 0) MF GO:0000166 nucleotide binding (46 genes) * **apoptotic**: 42 study genes, 2 GOs: 0) BP GO:0006915 apoptotic process (26 genes) 1) BP GO:0043066 negative regulation of apoptotic process (28 genes) Methodology For this exploration, we choose specific sets of GO terms for each plot based on frequently seen words in the GO term name. Examples of GO term names include "*rRNA processing*", "*poly(A) RNA binding*", and "*5S rRNA binding*". The common word for these GO terms is "*RNA*". Steps:1. Run a Gene Ontology Enrichment Analysis.2. Count all words in the significant GO term names.3. Inspect word-count list from step 2.4. Create curated list of words based on frequently seen GO term words.5. Get significant GO terms which contain the words of interest.6. Plot GO terms seen for each word of interest.7. Print a report with full details 1. Run GOEA. Save results.
###Code
%run goea_nbt3102_fncs.ipynb
goeaobj = get_goeaobj_nbt3102('fdr_bh')
# Read Nature data from Excel file (~400 study genes)
studygeneid2symbol = read_data_nbt3102()
# Run Gene Ontology Enrichment Analysis using Benjamini/Hochberg FDR correction
geneids_study = studygeneid2symbol.keys()
goea_results_all = goeaobj.run_study(geneids_study)
goea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.05]
###Output
EXISTS: go-basic.obo
go-basic.obo: format-version(1.2) data-version(releases/2016-04-27)
###Markdown
2. Count all words in the significant GO term names. 2a. Get list of significant GO term names
###Code
from __future__ import print_function
go_names = [r.name for r in goea_results_sig]
print(len(go_names)) # Includes ONLY signficant results
###Output
143
###Markdown
2b. Get word count in significant GO term names
###Code
import collections as cx
word2cnt = cx.Counter([word for name in go_names for word in name.split()])
###Output
_____no_output_____
###Markdown
3. Inspect word-count list generated at step 2.Words like "mitochondrial" can be interesting. Some words will not be interesting, such as "of".
###Code
# Print 10 most common words found in significant GO term names
print(word2cnt.most_common(10))
###Output
[('binding', 29), ('of', 25), ('regulation', 18), ('protein', 17), ('cell', 14), ('complex', 13), ('process', 10), ('positive', 9), ('actin', 8), ('activity', 8)]
###Markdown
4. Create curated list of words based on frequently seen GO term words.
###Code
freq_seen = ['RNA', 'translation', 'mitochond', 'ribosomal', 'ribosome',
'adhesion', 'endoplasmic', 'nucleotide', 'apoptotic']
###Output
_____no_output_____
###Markdown
5. For each word of interest, create a list of significant GOs whose name contains the word.
###Code
# Collect significant GOs for words in freq_seen (unordered)
word2siggos = cx.defaultdict(set)
# Loop through manually curated words of interest
for word in freq_seen:
# Check each significant GOEA result for the word of interest
for rec in goea_results_sig:
if word in rec.name:
word2siggos[word].add(rec.GO)
# Sort word2gos to have the same order as words in freq_seen
word2siggos = cx.OrderedDict([(w, word2siggos[w]) for w in freq_seen])
###Output
_____no_output_____
###Markdown
6. Plot GO terms seen for each word of interest. 6a. Create a convenient goid-to-goobject dictionary
###Code
goid2goobj_all = {nt.GO:nt.goterm for nt in goea_results_all}
print(len(goid2goobj_all))
###Output
16953
###Markdown
6b. Create plots formed by a shared word in the significant GO term's name
###Code
# Plot set of GOs for each frequently seen word
from goatools.godag_plot import plot_goid2goobj
for word, gos in word2siggos.items():
goid2goobj = {go:goid2goobj_all[go] for go in gos}
plot_goid2goobj(
"nbt3102_word_{WORD}.png".format(WORD=word),
goid2goobj, # source GOs to plot and their GOTerm object
study_items=15, # Max number of gene symbols to print in each GO term
id2symbol=studygeneid2symbol, # Contains GeneID-to-Symbol from Step 1
goea_results=goea_results_all, # pvals used for GO Term coloring
dpi=150)
###Output
WROTE: nbt3102_word_RNA.png
WROTE: nbt3102_word_translation.png
WROTE: nbt3102_word_mitochond.png
WROTE: nbt3102_word_ribosomal.png
WROTE: nbt3102_word_ribosome.png
WROTE: nbt3102_word_adhesion.png
WROTE: nbt3102_word_endoplasmic.png
WROTE: nbt3102_word_nucleotide.png
WROTE: nbt3102_word_apoptotic.png
###Markdown
6c. Example plot for "apoptotic"**Colors**:Please note that to have colors related to GOEA significance, you must provide the GOEA results, as shown here with the "goea_results=goea_results_all" argument. 1. **Levels of Statistical Significance**: 1. **light red** => *extremely significant* fdr_bh values (p<0.005) 2. **orange** => *very significant* fdr_bh values (p<0.01) 2. **yellow** => *significant* fdr_bh values (p<0.05) 3. **grey** => study terms which are ***not*** *statistically significant* (p>0.05)2. **High-level GO terms**: 1. **Cyan** => Level-01 GO terms *Please note* that the variable, *goea_results_all*, contains gene ids and fdr_bh alpha values for **all** study GO terms, significant or not. If the argument had only included the significant results, "goea_results=goea_results_sig", the currently colored grey GO terms would be white and would not have study genes annotated inside.**Gene Symbol Names** Please notice that the study gene symbol names are written in thier associated GO term box. Symbol names and not gene count nor gene ids are used because of the argument, "id2symbol=studygeneid2symbol", to the function, "plot_goid2goobj". 7. Print a report with full details 7a. Create detailed report
###Code
fout = "nbt3102_GO_word_genes.txt"
go2res = {nt.GO:nt for nt in goea_results_all}
with open(fout, "w") as prt:
prt.write("""This file is generated by test_nbt3102.py and is intended to confirm
this statement in the GOATOOLS manuscript:
We observed:
93 genes associated with RNA,
47 genes associated with translation,
70 genes associated with mitochondrial or mitochondrian, and
37 genes associated with ribosomal, as reported by GOATOOLS.
""")
for word, gos in word2siggos.items():
# Sort first by BP, MF, CC. Sort second by GO id.
gos = sorted(gos, key=lambda go: [go2res[go].NS, go])
genes = set()
for go in gos:
genes |= go2res[go].study_items
genes = sorted([studygeneid2symbol[g] for g in genes])
prt.write("\n{WD}: {N} study genes, {M} GOs\n".format(WD=word, N=len(genes), M=len(gos)))
prt.write("{WD} GOs: {GOs}\n".format(WD=word, GOs=", ".join(gos)))
for i, go in enumerate(gos):
res = go2res[go]
prt.write("{I}) {NS} {GO} {NAME} ({N} genes)\n".format(
I=i, NS=res.NS, GO=go, NAME=res.name, N=res.study_count))
prt.write("{N} study genes:\n".format(N=len(genes)))
N = 10 # Number of genes per line
mult = [genes[i:i+N] for i in range(0, len(genes), N)]
prt.write(" {}\n".format("\n ".join([", ".join(str(g) for g in sl) for sl in mult])))
print(" WROTE: {F}\n".format(F=fout))
###Output
WROTE: nbt3102_GO_word_genes.txt
###Markdown
Group Significant GO terms by Frequently Seen WordsWe use data from a 2014 Nature paper: [Computational analysis of cell-to-cell heterogeneityin single-cell RNA-sequencing data reveals hidden subpopulations of cells](http://www.nature.com/nbt/journal/v33/n2/full/nbt.3102.htmlmethods) This iPython notebook demonstrates one approach to explore Gene Ontology Enrichment Analysis (GOEA) results:1. Create sub-plots containing significant GO terms which share a common word, like **RNA**.2. Create detailed reports showing all significant GO terms and all study gene symbols for the common word. SummaryThe code in this notebook generates the data used in this statement in the GOATOOLS manuscript: We observed: 93 genes associated with RNA, 47 genes associated with translation, 70 genes associated with mitochondrial or mitochondrian, and 37 genes associated with ribosomal, as reported by GOATOOLS. DetailsDetails summarized here are also found in the file, [nbt3102_GO_word_genes.txt](./doc/nbt3102_GO_word_genes.txt), which is generated by this iPython notebook. * **RNA**: 93 study genes, 6 GOs: 0) BP GO:0006364 rRNA processing (8 genes) 1) MF GO:0003723 RNA binding (32 genes) 2) MF GO:0003729 mRNA binding (11 genes) 3) MF GO:0008097 5S rRNA binding (4 genes) 4) MF GO:0019843 rRNA binding (6 genes) 5) MF GO:0044822 poly(A) RNA binding (86 genes) * **translation**: 47 study genes, 5 GOs: 0) BP GO:0006412 translation (41 genes) 1) BP GO:0006414 translational elongation (7 genes) 2) BP GO:0006417 regulation of translation (9 genes) 3) MF GO:0003746 translation elongation factor activity (5 genes) 4) MF GO:0031369 translation initiation factor binding (4 genes) * **mitochond**: 70 study genes, 8 GOs: 0) BP GO:0051881 regulation of mitochondrial membrane potential (7 genes) 1) CC GO:0000275 mitochondrial proton-transporting ATP synthase complex, catalytic core F(1) (3 genes) 2) CC GO:0005739 mitochondrion (68 genes) 3) CC GO:0005743 mitochondrial inner membrane (28 genes) 4) CC GO:0005747 mitochondrial respiratory chain complex I (5 genes) 5) CC GO:0005753 mitochondrial proton-transporting ATP synthase complex (4 genes) 6) CC GO:0005758 mitochondrial intermembrane space (7 genes) 7) CC GO:0031966 mitochondrial membrane (6 gene0) CC GO:0005925 focal adhesion (53 genes) * **ribosomal**: 37 study genes, 6 GOs: 0) BP GO:0000028 ribosomal small subunit assembly (9 genes) 1) BP GO:0042274 ribosomal small subunit biogenesis (6 genes) 2) CC GO:0015934 large ribosomal subunit (4 genes) 3) CC GO:0015935 small ribosomal subunit (13 genes) 4) CC GO:0022625 cytosolic large ribosomal subunit (16 genes) 5) CC GO:0022627 cytosolic small ribosomal subunit (19 genes) Also seen, but not reported in the manuscript: * **ribosome**: 41 study genes, 2 GOs: 0) CC GO:0005840 ribosome (35 genes) 1) MF GO:0003735 structural constituent of ribosome (38 genes) * **adhesion**: 53 study genes, 1 GOs: 0) CC GO:0005925 focal adhesion (53 genes) * **endoplasmic**: 49 study genes, 3 GOs: 0) CC GO:0005783 endoplasmic reticulum (48 genes) 1) CC GO:0005790 smooth endoplasmic reticulum (5 genes) 2) CC GO:0070971 endoplasmic reticulum exit site (4 genes) * **nucleotide**: 46 study genes, 1 GOs: 0) MF GO:0000166 nucleotide binding (46 genes) * **apoptotic**: 42 study genes, 2 GOs: 0) BP GO:0006915 apoptotic process (26 genes) 1) BP GO:0043066 negative regulation of apoptotic process (28 genes) Methodology For this exploration, we choose specific sets of GO terms for each plot based on frequently seen words in the GO term name. Examples of GO term names include "*rRNA processing*", "*poly(A) RNA binding*", and "*5S rRNA binding*". The common word for these GO terms is "*RNA*". Steps:1. Run a Gene Ontology Enrichment Analysis.2. Count all words in the significant GO term names.3. Inspect word-count list from step 2.4. Create curated list of words based on frequently seen GO term words.5. Get significant GO terms which contain the words of interest.6. Plot GO terms seen for each word of interest.7. Print a report with full details 1. Run GOEA. Save results.
###Code
%run goea_nbt3102_fncs.ipynb
goeaobj = get_goeaobj_nbt3102('fdr_bh')
# Read Nature data from Excel file (~400 study genes)
studygeneid2symbol = read_data_nbt3102()
# Run Gene Ontology Enrichment Analysis using Benjamini/Hochberg FDR correction
geneids_study = studygeneid2symbol.keys()
goea_results_all = goeaobj.run_study(geneids_study)
goea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.05]
###Output
EXISTS: go-basic.obo
go-basic.obo: fmt(1.2) rel(2019-01-12) 47,374 GO Terms
EXISTS: gene2go
20,129 items READ: gene2go
fisher module not installed. Falling back on scipy.stats.fisher_exact
67% 19,017 of 28,212 population items found in association
99% 379 of 382 study items found in association
96% 382 of 400 study items found in population(28212)
Calculating 17,957 uncorrected p-values using fisher_scipy_stats
17,957 GO terms are associated with 19,017 of 28,212 population items
3,075 GO terms are associated with 379 of 400 study items
221 GO terms found significant (< 0.05=alpha) after multitest correction: statsmodels fdr_bh
###Markdown
2. Count all words in the significant GO term names. 2a. Get list of significant GO term names
###Code
from __future__ import print_function
go_names = [r.name for r in goea_results_sig]
print(len(go_names)) # Includes ONLY signficant results
###Output
221
###Markdown
2b. Get word count in significant GO term names
###Code
import collections as cx
word2cnt = cx.Counter([word for name in go_names for word in name.split()])
###Output
_____no_output_____
###Markdown
3. Inspect word-count list generated at step 2.Words like "mitochondrial" can be interesting. Some words will not be interesting, such as "of".
###Code
# Print 10 most common words found in significant GO term names
print(word2cnt.most_common(10))
###Output
[('of', 50), ('regulation', 40), ('binding', 37), ('protein', 24), ('cell', 22), ('activity', 19), ('process', 18), ('positive', 18), ('complex', 14), ('negative', 12)]
###Markdown
4. Create curated list of words based on frequently seen GO term words.
###Code
freq_seen = ['RNA', 'translation', 'mitochond', 'ribosomal', 'ribosome',
'adhesion', 'endoplasmic', 'nucleotide', 'apoptotic']
###Output
_____no_output_____
###Markdown
5. For each word of interest, create a list of significant GOs whose name contains the word.
###Code
# Collect significant GOs for words in freq_seen (unordered)
word2siggos = cx.defaultdict(set)
# Loop through manually curated words of interest
for word in freq_seen:
# Check each significant GOEA result for the word of interest
for rec in goea_results_sig:
if word in rec.name:
word2siggos[word].add(rec.GO)
# Sort word2gos to have the same order as words in freq_seen
word2siggos = cx.OrderedDict([(w, word2siggos[w]) for w in freq_seen])
###Output
_____no_output_____
###Markdown
6. Plot GO terms seen for each word of interest. 6a. Create a convenient goid-to-goobject dictionary
###Code
goid2goobj_all = {nt.GO:nt.goterm for nt in goea_results_all}
print(len(goid2goobj_all))
###Output
17957
###Markdown
6b. Create plots formed by a shared word in the significant GO term's name
###Code
# Plot set of GOs for each frequently seen word
from goatools.godag_plot import plot_goid2goobj
for word, gos in word2siggos.items():
goid2goobj = {go:goid2goobj_all[go] for go in gos}
plot_goid2goobj(
"nbt3102_word_{WORD}.png".format(WORD=word),
goid2goobj, # source GOs to plot and their GOTerm object
study_items=15, # Max number of gene symbols to print in each GO term
id2symbol=studygeneid2symbol, # Contains GeneID-to-Symbol from Step 1
goea_results=goea_results_all, # pvals used for GO Term coloring
dpi=150)
###Output
7 usr 49 GOs WROTE: nbt3102_word_RNA.png
8 usr 99 GOs WROTE: nbt3102_word_translation.png
12 usr 73 GOs WROTE: nbt3102_word_mitochond.png
8 usr 30 GOs WROTE: nbt3102_word_ribosomal.png
4 usr 19 GOs WROTE: nbt3102_word_ribosome.png
1 usr 7 GOs WROTE: nbt3102_word_adhesion.png
4 usr 34 GOs WROTE: nbt3102_word_endoplasmic.png
1 usr 7 GOs WROTE: nbt3102_word_nucleotide.png
5 usr 54 GOs WROTE: nbt3102_word_apoptotic.png
###Markdown
6c. Example plot for "apoptotic"**Colors**:Please note that to have colors related to GOEA significance, you must provide the GOEA results, as shown here with the "goea_results=goea_results_all" argument. 1. **Levels of Statistical Significance**: 1. **light red** => *extremely significant* fdr_bh values (p<0.005) 2. **orange** => *very significant* fdr_bh values (p<0.01) 2. **yellow** => *significant* fdr_bh values (p<0.05) 3. **grey** => study terms which are ***not*** *statistically significant* (p>0.05)2. **High-level GO terms**: 1. **Cyan** => Level-01 GO terms *Please note* that the variable, *goea_results_all*, contains gene ids and fdr_bh alpha values for **all** study GO terms, significant or not. If the argument had only included the significant results, "goea_results=goea_results_sig", the currently colored grey GO terms would be white and would not have study genes annotated inside.**Gene Symbol Names** Please notice that the study gene symbol names are written in thier associated GO term box. Symbol names and not gene count nor gene ids are used because of the argument, "id2symbol=studygeneid2symbol", to the function, "plot_goid2goobj". 7. Print a report with full details 7a. Create detailed report
###Code
fout = "nbt3102_GO_word_genes.txt"
go2res = {nt.GO:nt for nt in goea_results_all}
with open(fout, "w") as prt:
prt.write("""This file is generated by test_nbt3102.py and is intended to confirm
this statement in the GOATOOLS manuscript:
We observed:
93 genes associated with RNA,
47 genes associated with translation,
70 genes associated with mitochondrial or mitochondrian, and
37 genes associated with ribosomal, as reported by GOATOOLS.
""")
for word, gos in word2siggos.items():
# Sort first by BP, MF, CC. Sort second by GO id.
gos = sorted(gos, key=lambda go: [go2res[go].NS, go])
genes = set()
for go in gos:
genes |= go2res[go].study_items
genes = sorted([studygeneid2symbol[g] for g in genes])
prt.write("\n{WD}: {N} study genes, {M} GOs\n".format(WD=word, N=len(genes), M=len(gos)))
prt.write("{WD} GOs: {GOs}\n".format(WD=word, GOs=", ".join(gos)))
for i, go in enumerate(gos):
res = go2res[go]
prt.write("{I}) {NS} {GO} {NAME} ({N} genes)\n".format(
I=i, NS=res.NS, GO=go, NAME=res.name, N=res.study_count))
prt.write("{N} study genes:\n".format(N=len(genes)))
N = 10 # Number of genes per line
mult = [genes[i:i+N] for i in range(0, len(genes), N)]
prt.write(" {}\n".format("\n ".join([", ".join(str(g) for g in sl) for sl in mult])))
print(" WROTE: {F}\n".format(F=fout))
###Output
WROTE: nbt3102_GO_word_genes.txt
|
radio_stream/splice_audio.ipynb | ###Markdown
Splice AudioThis file contains basic functions for splicing a small piece out of a larger audio file for easy running and debugging with Kaldi
###Code
# Import the AudioSegment class for processing audio and the
# split_on_silence function for separating out silent chunks.
from pydub import AudioSegment
from pydub import effects
from pydub.silence import split_on_silence
from pydub.utils import mediainfo
# Define a function to normalize a chunk to a target amplitude.
def signal_process_for_kaldi(aChunk, target_dBFS=-20):
"""normalize a chunk to a target amplitude"""
change_in_dBFS = target_dBFS - aChunk.dBFS
aChunk = aChunk.apply_gain(change_in_dBFS)
aChunk = aChunk.low_pass_filter(1600)
aChunk = aChunk.high_pass_filter(200)
return aChunk
# function to display parameters of a given audio segment
def display_sound_file_parameters(aSegment):
"""display parameters of a given audio segment"""
print("audio length: {} seconds".format(aSegment.duration_seconds))
print("input average dB: {}".format(aSegment.dBFS) )
print("frame rate: {} kHz".format(aSegment.frame_rate/1000 ))
print("channels: {}".format(aSegment.channels))
# define a function to splice out an appropriate audio chunk and write it to a wav file
def splice_out_audio_chunk_by_time(input_sound_file_name, startMin=0, startSec=0,
endMin=0, endSec=10, normalize_to_dBFS=-20.0):
"""splice out an appropriate audio chunk and write it to a wav file"""
# Convert input time to milliseconds
startTime = startMin*60*1000 + startSec*1000
endTime = endMin*60*1000 + endSec*1000
# Load your audio
print("loading input file: {}".format(input_sound_file_name))
input_sound_file = AudioSegment.from_mp3(input_sound_file_name + ".mp3")
# output a few parameters
print("input file parameters:")
display_sound_file_parameters(input_sound_file)
# do very basic scaling for kaldi
input_sound_file = input_sound_file.set_channels(1)
input_sound_file = input_sound_file.set_frame_rate(16000)
# do the splice
extract = input_sound_file[startTime:endTime]
# figure out the output file name - insert start and stop times into the filename
output_file_name = input_sound_file_name \
+ "--" + str(startMin).zfill(2) + str(startSec).zfill(2) \
+ "-" + str(endMin).zfill(2) + str(endSec).zfill(2)
# Normalize the entire chunk.
print("normalizing...")
extract = signal_process_for_kaldi(extract, -20.0)
# output the new parameters
print("output paramaters:")
display_sound_file_parameters(extract)
# output the resulting audio file as a wav
print("writing to disk " + output_file_name + ".wav ...")
extract.export(output_file_name + ".wav", bitrate = "192k", format="wav")
return extract
###Output
_____no_output_____
###Markdown
Run blockHere we can try a sample file to see how it splits things up into a smaller segment for testing and running and debugging; we'll specify exactly a start and stop time
###Code
# set your user parameters here - really just the filename of a big mp3 file
# then we'll call functions to split it up
input_sound_file_name = "SWAHILI-NEWS-100820_first12minutes" # mp3 file filename - withOUT the .mp3 suffix
exported_chunk = splice_out_audio_chunk_by_time("SWAHILI-NEWS-100820", # mp3 file filename (w/o suffix)
startMin = 10, # start time for splice
startSec = 16,
endMin = 10, # end time for splice
endSec = 34)
print("done")
# run this to get parameters and information about a wav file
#sf = AudioSegment.from_wav("wave_file_name_here.wav")
#display_sound_file_parameters(sf)
###Output
audio length: 5.22 seconds
input average dB: -19.015677414152567
frame rate: 16.0 kHz
channels: 1
|
NumPy Mini-Project/solve_myself.ipynb | ###Markdown
Mean NormalizationIn machine learning we use large amounts of data to train our models. Some machine learning algorithms may require that the data is *normalized* in order to work correctly. The idea of normalization, also known as *feature scaling*, is to ensure that all the data is on a similar scale, *i.e.* that all the data takes on a similar range of values. For example, we might have a dataset that has values between 0 and 5,000. By normalizing the data we can make the range of values be between 0 and 1.In this lab, you will be performing a different kind of feature scaling known as *mean normalization*. Mean normalization will scale the data, but instead of making the values be between 0 and 1, it will distribute the values evenly in some small interval around zero. For example, if we have a dataset that has values between 0 and 5,000, after mean normalization the range of values will be distributed in some small range around 0, for example between -3 to 3. Because the range of values are distributed evenly around zero, this guarantees that the average (mean) of all elements will be zero. Therefore, when you perform *mean normalization* your data will not only be scaled but it will also have an average of zero. To Do:You will start by importing NumPy and creating a rank 2 ndarray of random integers between 0 and 5,000 (inclusive) with 1000 rows and 20 columns. This array will simulate a dataset with a wide range of values. Fill in the code below
###Code
# import NumPy into Python
import numpy as np
# Create a 1000 x 20 ndarray with random integers in the half-open interval [0, 5001).
X = np.random.randint(0,5001,(1000, 20))
# print the shape of X
print(X)
###Output
[[4092 2038 781 ... 1047 2166 4117]
[2860 1848 2653 ... 80 4916 80]
[3638 3317 3099 ... 2003 2951 3607]
...
[1694 102 3010 ... 3842 3249 1394]
[3624 3260 3610 ... 2086 3891 2069]
[1757 3296 2512 ... 1290 1861 933]]
###Markdown
Now that you created the array we will mean normalize it. We will perform mean normalization using the following equation:$\mbox{Norm_Col}_i = \frac{\mbox{Col}_i - \mu_i}{\sigma_i}$where $\mbox{Col}_i$ is the $i$th column of $X$, $\mu_i$ is average of the values in the $i$th column of $X$, and $\sigma_i$ is the standard deviation of the values in the $i$th column of $X$. In other words, mean normalization is performed by subtracting from each column of $X$ the average of its values, and then by dividing by the standard deviation of its values. In the space below, you will first calculate the average and standard deviation of each column of $X$.
###Code
# Average of the values in each column of X
ave_cols =np.mean(X, axis=0)
# Standard Deviation of the values in each column of X
std_cols = np.std(X, axis=0)
###Output
_____no_output_____
###Markdown
If you have done the above calculations correctly, then `ave_cols` and `std_cols`, should both be vectors with shape `(20,)` since $X$ has 20 columns. You can verify this by filling the code below:
###Code
# Print the shape of ave_cols
print(ave_cols.shape)
# Print the shape of std_cols
print(std_cols.shape)
###Output
(20,)
(20,)
###Markdown
You can now take advantage of Broadcasting to calculate the mean normalized version of $X$ in just one line of code using the equation above. Fill in the code below
###Code
# Mean normalize X
X_norm =(X-ave_cols)/std_cols
print(X_norm)
###Output
[[ 1.0811685 -0.36393799 -1.2145216 ... -0.9857293 -0.2607601
1.10453325]
[ 0.2449124 -0.49897122 0.11205226 ... -1.67028025 1.68802953
-1.65699141]
[ 0.7730027 0.54504882 0.42810565 ... -0.30896538 0.29553076
0.75566588]
...
[-0.54654427 -1.73985548 0.3650367 ... 0.99288487 0.50670869
-0.75814489]
[ 0.76349979 0.50453885 0.79022063 ... -0.25020868 0.96166249
-0.29640866]
[-0.50378118 0.53012409 0.01213404 ... -0.81370667 -0.47689859
-1.07349363]]
###Markdown
If you have performed the mean normalization correctly, then the average of all the elements in $X_{\tiny{\mbox{norm}}}$ should be close to zero, and they should be evenly distributed in some small interval around zero. You can verify this by filing the code below:
###Code
# Print the average of all the values of X_norm
print(np.mean(X_norm))
# Print the average of the minimum value in each column of X_norm
print(X_norm.min(axis =0).mean())
# Print the average of the maximum value in each column of X_norm
print(X_norm.max(axis = 0).mean())
###Output
1.0835776720341529e-17
-1.7317171444518529
1.7389792299797162
###Markdown
You should note that since $X$ was created using random integers, the above values will vary. Data SeparationAfter the data has been mean normalized, it is customary in machine learnig to split our dataset into three sets:1. A Training Set2. A Cross Validation Set3. A Test SetThe dataset is usually divided such that the Training Set contains 60% of the data, the Cross Validation Set contains 20% of the data, and the Test Set contains 20% of the data. In this part of the lab you will separate `X_norm` into a Training Set, Cross Validation Set, and a Test Set. Each data set will contain rows of `X_norm` chosen at random, making sure that we don't pick the same row twice. This will guarantee that all the rows of `X_norm` are chosen and randomly distributed among the three new sets.You will start by creating a rank 1 ndarray that contains a random permutation of the row indices of `X_norm`. You can do this by using the `np.random.permutation()` function. The `np.random.permutation(N)` function creates a random permutation of integers from 0 to `N - 1`. Let's see an example:
###Code
# We create a random permutation of integers 0 to 4
np.random.permutation(5)
###Output
_____no_output_____
###Markdown
To DoIn the space below create a rank 1 ndarray that contains a random permutation of the row indices of `X_norm`. You can do this in one line of code by extracting the number of rows of `X_norm` using the `shape` attribute and then passing it to the `np.random.permutation()` function. Remember the `shape` attribute returns a tuple with two numbers in the form `(rows,columns)`.
###Code
# Create a rank 1 ndarray that contains a random permutation of the row indices of `X_norm`
row_indices = np.random.permutation(X_norm.shape[0])
###Output
_____no_output_____
###Markdown
Now you can create the three datasets using the `row_indices` ndarray to select the rows that will go into each dataset. Rememeber that the Training Set contains 60% of the data, the Cross Validation Set contains 20% of the data, and the Test Set contains 20% of the data. Each set requires just one line of code to create. Fill in the code below
###Code
# Make any necessary calculations.
# You can save your calculations into variables to use later.
# Create a Training Set
X_train = X_norm[row_indices[:int(len(X_norm) * 0.6)], :]
# Create a Cross Validation Set
X_crossVal = X_crossVal = X_norm[row_indices[int(len(X_norm) * 0.6): int(len(X_norm) * 0.8)], :]
# Create a Test Set
X_test =X_norm[row_indices[ int(len(X_norm) * 0.8):], :]
###Output
_____no_output_____
###Markdown
If you performed the above calculations correctly, then `X_tain` should have 600 rows and 20 columns, `X_crossVal` should have 200 rows and 20 columns, and `X_test` should have 200 rows and 20 columns. You can verify this by filling the code below:
###Code
# Print the shape of X_train
print(X_train.shape)
# Print the shape of X_crossVal
print(X_crossVal.shape)
# Print the shape of X_test
print(X_test.shape)
###Output
(600, 20)
(200, 20)
(200, 20)
|
tutorials/W2D4_DynamicNetworks/W2D4_Tutorial1.ipynb | ###Markdown
Tutorial 1: Neural Rate Models**Week 2, Day 4: Dynamic Networks****By Neuromatch Academy**__Content creators:__ Qinglong Gu, Songtin Li, Arvind Kumar, John Murray, Julijana Gjorgjieva __Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Spiros Chavlis, Michael Waskom, Siddharth Suresh **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial Objectives*Estimated timing of tutorial: 1 hour, 25 minutes*The brain is a complex system, not because it is composed of a large number of diverse types of neurons, but mainly because of how neurons are connected to each other. The brain is indeed a network of highly specialized neuronal networks. The activity of a neural network constantly evolves in time. For this reason, neurons can be modeled as dynamical systems. The dynamical system approach is only one of the many modeling approaches that computational neuroscientists have developed (other points of view include information processing, statistical models, etc.). How the dynamics of neuronal networks affect the representation and processing of information in the brain is an open question. However, signatures of altered brain dynamics present in many brain diseases (e.g., in epilepsy or Parkinson's disease) tell us that it is crucial to study network activity dynamics if we want to understand the brain.In this tutorial, we will simulate and study one of the simplest models of biological neuronal networks. Instead of modeling and simulating individual excitatory neurons (e.g., LIF models that you implemented yesterday), we will treat them as a single homogeneous population and approximate their dynamics using a single one-dimensional equation describing the evolution of their average spiking rate in time.In this tutorial, we will learn how to build a firing rate model of a single population of excitatory neurons. **Steps:**- Write the equation for the firing rate dynamics of a 1D excitatory population.- Visualize the response of the population as a function of parameters such as threshold level and gain, using the frequency-current (F-I) curve.- Numerically simulate the dynamics of the excitatory population and find the fixed points of the system.
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/nvuty/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup
###Code
# Imports
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt # root-finding algorithm
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Plotting Functions
def plot_fI(x, f):
plt.figure(figsize=(6, 4)) # plot the figure
plt.plot(x, f, 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
def plot_dr_r(r, drdt, x_fps=None):
plt.figure()
plt.plot(r, drdt, 'k')
plt.plot(r, 0. * r, 'k--')
if x_fps is not None:
plt.plot(x_fps, np.zeros_like(x_fps), "ko", ms=12)
plt.xlabel(r'$r$')
plt.ylabel(r'$\frac{dr}{dt}$', fontsize=20)
plt.ylim(-0.1, 0.1)
def plot_dFdt(x, dFdt):
plt.figure()
plt.plot(x, dFdt, 'r')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('dF(x)', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Neuronal network dynamics
###Code
# @title Video 1: Dynamic networks
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1dh411o7qJ", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="p848349hPyw", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This video covers how to model a network with a single population of neurons and introduces neural rate-based models. It overviews feedforward networks and defines the F-I (firing rate vs. input) curve. Section 1.1: Dynamics of a single excitatory population Click here for text recap of relevant part of video Individual neurons respond by spiking. When we average the spikes of neurons in a population, we can define the average firing activity of the population. In this model, we are interested in how the population-averaged firing varies as a function of time and network parameters. Mathematically, we can describe the firing rate dynamic of a feed-forward network as:\begin{align}\tau \frac{dr}{dt} &= -r + F(I_{\text{ext}}) \quad\qquad (1)\end{align}$r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs.To start building the model, please execute the cell below to initialize the simulation parameters.
###Code
# @markdown *Execute this cell to set default parameters for a single excitatory population model*
def default_pars_single(**kwargs):
pars = {}
# Excitatory parameters
pars['tau'] = 1. # Timescale of the E population [ms]
pars['a'] = 1.2 # Gain of the E population
pars['theta'] = 2.8 # Threshold of the E population
# Connection strength
pars['w'] = 0. # E to E, we first set it to 0
# External input
pars['I_ext'] = 0.
# simulation parameters
pars['T'] = 20. # Total duration of simulation [ms]
pars['dt'] = .1 # Simulation time step [ms]
pars['r_init'] = 0.2 # Initial value of E
# External parameters if any
pars.update(kwargs)
# Vector of discretized time points [ms]
pars['range_t'] = np.arange(0, pars['T'], pars['dt'])
return pars
pars = default_pars_single()
print(pars)
###Output
_____no_output_____
###Markdown
You can now use:- `pars = default_pars_single()` to get all the parameters. - `pars = default_pars_single(T=T_sim, dt=time_step)` to set new simulation time and time step- To update an existing parameter dictionary, use `pars['New_para'] = value`Because `pars` is a dictionary, it can be passed to a function that requires individual parameters as arguments using `my_func(**pars)` syntax. Section 1.2: F-I curves*Estimated timing to here from start of tutorial: 17 min* Click here for text recap of relevant part of video In electrophysiology, a neuron is often characterized by its spike rate output in response to input currents. This is often called the **F-I** curve, denoting the output spike frequency (**F**) in response to different injected currents (**I**). We estimated this for an LIF neuron in yesterday's tutorial.The transfer function $F(\cdot)$ in Equation $1$ represents the gain of the population as a function of the total input. The gain is often modeled as a sigmoidal function, i.e., more input drive leads to a nonlinear increase in the population firing rate. The output firing rate will eventually saturate for high input values. A sigmoidal $F(\cdot)$ is parameterized by its gain $a$ and threshold $\theta$.$$ F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}} \quad(2)$$The argument $x$ represents the input to the population. Note that the second term is chosen so that $F(0;a,\theta)=0$.Many other transfer functions (generally monotonic) can be also used. Examples are the rectified linear function $ReLU(x)$ or the hyperbolic tangent $tanh(x)$. Coding Exercise 1.2: Implement F-I curve Let's first investigate the activation functions before simulating the dynamics of the entire population. In this exercise, you will implement a sigmoidal **F-I** curve or transfer function $F(x)$, with gain $a$ and threshold level $\theta$ as parameters:$$F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}}$$
###Code
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
#################################################
## TODO for students: compute f = F(x) ##
# Fill out function and remove
raise NotImplementedError("Student excercise: implement the f-I function")
#################################################
# Define the sigmoidal transfer function f = F(x)
f = ...
return f
# Set parameters
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Compute transfer function
f = F(x, pars['a'], pars['theta'])
# Visualize
plot_fI(x, f)
# to_remove solution
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
# Define the sigmoidal transfer function f = F(x)
f = (1 + np.exp(-a * (x - theta)))**-1 - (1 + np.exp(a * theta))**-1
return f
# Set parameters
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Compute transfer function
f = F(x, pars['a'], pars['theta'])
# Visualize
with plt.xkcd():
plot_fI(x, f)
###Output
_____no_output_____
###Markdown
Interactive Demo 1.2 : Parameter exploration of F-I curveHere's an interactive demo that shows how the F-I curve changes for different values of the gain and threshold parameters. 1. How does the gain parameter ($a$) affect the F-I curve?2. How does the threshold parameter ($\theta$) affect the F-I curve?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def interactive_plot_FI(a, theta):
"""
Population activation function.
Expecxts:
a : the gain of the function
theta : the threshold of the function
Returns:
plot the F-I curve with give parameters
"""
# set the range of input
x = np.arange(0, 10, .1)
plt.figure()
plt.plot(x, F(x, a, theta), 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
_ = widgets.interact(interactive_plot_FI, a=(0.3, 3, 0.3), theta=(2, 4, 0.2))
# to_remove explanation
"""
1) a determines the slope (gain) of the rising phase of the F-I curve
2) theta determines the input at which the function F(x) reaches its mid-value (0.5).
That is, theta shifts the F-I curve along the horizontal axis.
For our neurons we are using in this tutorial:
- a controls the gain of the neuron population
- theta controls the threshold at which the neuron population starts to respond
""";
###Output
_____no_output_____
###Markdown
Section 1.3: Simulation scheme of E dynamics*Estimated timing to here from start of tutorial: 27 min*Because $F(\cdot)$ is a nonlinear function, the exact solution of our differential equation of population activity can not be determined via analytical methods. As we have seen before, we can use numerical methods, specifically the Euler method, to find the solution (that is, simulate the population activity).
###Code
# @markdown *Execute this cell to enable the single population rate model simulator: `simulate_single`*
def simulate_single(pars):
"""
Simulate an excitatory population of neurons
Args:
pars : Parameter dictionary
Returns:
rE : Activity of excitatory population (array)
Example:
pars = default_pars_single()
r = simulate_single(pars)
"""
# Set parameters
tau, a, theta = pars['tau'], pars['a'], pars['theta']
w = pars['w']
I_ext = pars['I_ext']
r_init = pars['r_init']
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Initialize activity
r = np.zeros(Lt)
r[0] = r_init
I_ext = I_ext * np.ones(Lt)
# Update the E activity
for k in range(Lt - 1):
dr = dt / tau * (-r[k] + F(w * r[k] + I_ext[k], a, theta))
r[k+1] = r[k] + dr
return r
help(simulate_single)
###Output
_____no_output_____
###Markdown
Interactive Demo 1.3: Parameter Exploration of single population dynamicsExplore these dynamics of the population activity in this interactive demo.1. How does $r_{\text{sim}}(t)$ change with different $I_{\text{ext}}$ values? 2. How does it change with different $\tau$ values? Note that, $r_{\rm ana}(t)$ denotes the analytical solution - you will learn how this is computed in the next section.
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
# get default parameters
pars = default_pars_single(T=20.)
def Myplot_E_diffI_difftau(I_ext, tau):
# set external input and time constant
pars['I_ext'] = I_ext
pars['tau'] = tau
# simulation
r = simulate_single(pars)
# Analytical Solution
r_ana = (pars['r_init']
+ (F(I_ext, pars['a'], pars['theta'])
- pars['r_init']) * (1. - np.exp(-pars['range_t'] / pars['tau'])))
# plot
plt.figure()
plt.plot(pars['range_t'], r, 'b', label=r'$r_{\mathrm{sim}}$(t)', alpha=0.5,
zorder=1)
plt.plot(pars['range_t'], r_ana, 'b--', lw=5, dashes=(2, 2),
label=r'$r_{\mathrm{ana}}$(t)', zorder=2)
plt.plot(pars['range_t'],
F(I_ext, pars['a'], pars['theta']) * np.ones(pars['range_t'].size),
'k--', label=r'$F(I_{\mathrm{ext}})$')
plt.xlabel('t (ms)', fontsize=16.)
plt.ylabel('Activity r(t)', fontsize=16.)
plt.legend(loc='best', fontsize=14.)
plt.show()
_ = widgets.interact(Myplot_E_diffI_difftau, I_ext=(0.0, 10., 1.),
tau=(1., 5., 0.2))
# to_remove explanation
"""
1) Weak inputs to the neurons eventually result in the activity converging to zero.
Strong inputs to the neurons eventually result in the activity converging to max value
2) The time constant tau, does not affect the final response reached but it determines
the time the neurons take to reach to their fixed point.
""";
###Output
_____no_output_____
###Markdown
Think! 1.3: Finite activitiesAbove, we have numerically solved a system driven by a positive input. Yet, $r_E(t)$ either decays to zero or reaches a fixed non-zero value.1. Why doesn't the solution of the system "explode" in a finite time? In other words, what guarantees that $r_E$(t) stays finite? 2. Which parameter would you change in order to increase the maximum value of the response?
###Code
# to_remove explanation
"""
1) As the F-I curve is bounded between zero and one, the system doesn't explode.
The f-curve guarantees this property
2) One way to increase the maximum response is to change the f-I curve. For
example, the ReLU is an unbounded function, and thus will increase the overall maximal
response of the network.
""";
###Output
_____no_output_____
###Markdown
--- Section 2: Fixed points of the single population system*Estimated timing to here from start of tutorial: 45 min* Section 2.1: Finding fixed points
###Code
# @title Video 2: Fixed point
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1v54y1v7Gr", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Ox3ELd1UFyo", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This video introduces recurrent networks and how to derive their fixed points. It also introduces vector fields and phase planes in one dimension. Click here for text recap of video We can now extend our feed-forward network to a recurrent network, governed by the equation:\begin{align}\tau \frac{dr}{dt} &= -r + F(w\cdot r + I_{\text{ext}}) \quad\qquad (3)\end{align}where as before, $r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs. Now we also have $w$ which denotes the strength (synaptic weight) of the recurrent input to the population.As you varied the two parameters in the last Interactive Demo, you noticed that, while at first the system output quickly changes, with time, it reaches its maximum/minimum value and does not change anymore. The value eventually reached by the system is called the **steady state** of the system, or the **fixed point**. Essentially, in the steady states the derivative with respect to time of the activity ($r$) is zero, i.e. $\displaystyle \frac{dr}{dt}=0$. We can find that the steady state of the Equation. (1) by setting $\displaystyle{\frac{dr}{dt}=0}$ and solve for $r$:\begin{equation}-r_{\text{steady}} + F(w\cdot r_{\text{steady}} + I_{\text{ext}};a,\theta) = 0 \qquad (4)\end{equation}When it exists, the solution of Equation. (4) defines a **fixed point** of the dynamical system in Equation (3). Note that if $F(x)$ is nonlinear, it is not always possible to find an analytical solution, but the solution can be found via numerical simulations, as we will do later.From the Interactive Demo, one could also notice that the value of $\tau$ influences how quickly the activity will converge to the steady state from its initial value. In the specific case of $w=0$, we can also analytically compute the solution of Equation (1) (i.e., the thick blue dashed line) and deduce the role of $\tau$ in determining the convergence to the fixed point: \begin{equation}\displaystyle{r(t) = \big{[}F(I_{\text{ext}};a,\theta) -r(t=0)\big{]} (1-\text{e}^{-\frac{t}{\tau}})} + r(t=0)\end{equation}We can now numerically calculate the fixed point with a root finding algorithm. Coding Exercise 2.1.1: Visualization of the fixed pointsWhen it is not possible to find the solution for Equation (3) analytically, a graphical approach can be taken. To that end, it is useful to plot $\displaystyle{\frac{dr}{dt}}$ as a function of $r$. The values of $r$ for which the plotted function crosses zero on the y axis correspond to fixed points. Here, let us, for example, set $w=5.0$ and $I^{\text{ext}}=0.5$. From Equation (3), you can obtain$$\frac{dr}{dt} = [-r + F(w\cdot r + I^{\text{ext}})]\,/\,\tau$$Then, plot the $dr/dt$ as a function of $r$, and check for the presence of fixed points.
###Code
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
#########################################################################
# TODO compute drdt and disable the error
raise NotImplementedError("Finish the compute_drdt function")
#########################################################################
# Calculate drdt
drdt = ...
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Compute dr/dt
drdt = compute_drdt(r, **pars)
# Visualize
plot_dr_r(r, drdt)
# to_remove solution
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
# Calculate drdt
drdt = (-r + F(w * r + I_ext, a, theta)) / tau
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Compute dr/dt
drdt = compute_drdt(r, **pars)
# Visualize
with plt.xkcd():
plot_dr_r(r, drdt)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1.2: Numerical calculution of fixed pointsWe will now find the fixed points numerically. To do so, we need to specify initial values ($r_{\text{guess}}$) for the root-finding algorithm to start from. From the line $\displaystyle{\frac{dr}{dt}}$ plotted above in the last exercise, initial values can be chosen as a set of values close to where the line crosses zero on the y axis (real fixed point).The next cell defines three helper functions that we will use:- `my_fp_single(r_guess, **pars)` uses a root-finding algorithm to locate a fixed point near a given initial value- `check_fp_single(x_fp, **pars)` verifies that the values of $r_{\rm fp}$ for which $\displaystyle{\frac{dr}{dt}} = 0$ are the true fixed points- `my_fp_finder(r_guess_vector, **pars)` accepts an array of initial values and finds the same number of fixed points, using the above two functions
###Code
# @markdown *Execute this cell to enable the fixed point functions*
def my_fp_single(r_guess, a, theta, w, I_ext, **other_pars):
"""
Calculate the fixed point through drE/dt=0
Args:
r_guess : Initial value used for scipy.optimize function
a, theta, w, I_ext : simulation parameters
Returns:
x_fp : value of fixed point
"""
# define the right hand of E dynamics
def my_WCr(x):
r = x
drdt = (-r + F(w * r + I_ext, a, theta))
y = np.array(drdt)
return y
x0 = np.array(r_guess)
x_fp = opt.root(my_WCr, x0).x.item()
return x_fp
def check_fp_single(x_fp, a, theta, w, I_ext, mytol=1e-4, **other_pars):
"""
Verify |dr/dt| < mytol
Args:
fp : value of fixed point
a, theta, w, I_ext: simulation parameters
mytol : tolerance, default as 10^{-4}
Returns :
Whether it is a correct fixed point: True/False
"""
# calculate Equation(3)
y = x_fp - F(w * x_fp + I_ext, a, theta)
# Here we set tolerance as 10^{-4}
return np.abs(y) < mytol
def my_fp_finder(pars, r_guess_vector, mytol=1e-4):
"""
Calculate the fixed point(s) through drE/dt=0
Args:
pars : Parameter dictionary
r_guess_vector : Initial values used for scipy.optimize function
mytol : tolerance for checking fixed point, default as 10^{-4}
Returns:
x_fps : values of fixed points
"""
x_fps = []
correct_fps = []
for r_guess in r_guess_vector:
x_fp = my_fp_single(r_guess, **pars)
if check_fp_single(x_fp, **pars, mytol=mytol):
x_fps.append(x_fp)
return x_fps
help(my_fp_finder)
# Set parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Compute dr/dt
drdt = compute_drdt(r, **pars)
#############################################################################
# TODO for students:
# Define initial values close to the intersections of drdt and y=0
# (How many initial values? Hint: How many times do the two lines intersect?)
# Calculate the fixed point with these initial values and plot them
raise NotImplementedError('student_exercise: find fixed points numerically')
#############################################################################
# Initial guesses for fixed points
r_guess_vector = [...]
# Find fixed point numerically
x_fps = my_fp_finder(pars, r_guess_vector)
# Visualize
plot_dr_r(r, drdt, x_fps)
# to_remove solution
# Set parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Compute dr/dt
drdt = compute_drdt(r, **pars)
# Initial guesses for fixed points
r_guess_vector = [0, .4, .9]
# Find fixed point numerically
x_fps = my_fp_finder(pars, r_guess_vector)
# Visualize
with plt.xkcd():
plot_dr_r(r, drdt, x_fps)
###Output
_____no_output_____
###Markdown
Interactive Demo 2.1: fixed points as a function of recurrent and external inputs.You can now explore how the previous plot changes when the recurrent coupling $w$ and the external input $I_{\text{ext}}$ take different values. How does the number of fixed points change?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def plot_intersection_single(w, I_ext):
# set your parameters
pars = default_pars_single(w=w, I_ext=I_ext)
# find fixed points
r_init_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_init_vector)
# plot
r = np.linspace(0, 1., 1000)
drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']
plot_dr_r(r, drdt, x_fps)
_ = widgets.interact(plot_intersection_single, w=(1, 7, 0.2),
I_ext=(0, 3, 0.1))
# to_remove explanation
"""
The fixed points of the single excitatory neuron population are determined by both
recurrent connections w and external input I_ext. In a previous interactive demo
we saw how the system showed two different steady-states when w = 0. But when w
doe not equal 0, for some range of w the system shows three fixed points and the
steady state depends on the initial conditions (i.e.
r at time zero.).
""";
###Output
_____no_output_____
###Markdown
Section 2.2: Relationship between trajectories & fixed pointsLet's examine the relationship between the population activity over time and the fixed points.Here, let us first set $w=5.0$ and $I_{\text{ext}}=0.5$, and investigate the dynamics of $r(t)$ starting with different initial values $r(0) \equiv r_{\text{init}}$.
###Code
# @markdown Execute to visualize dr/dt
def plot_intersection_single(w, I_ext):
# set your parameters
pars = default_pars_single(w=w, I_ext=I_ext)
# find fixed points
r_init_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_init_vector)
# plot
r = np.linspace(0, 1., 1000)
drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']
plot_dr_r(r, drdt, x_fps)
plot_intersection_single(w = 5.0, I_ext = 0.5)
###Output
_____no_output_____
###Markdown
Interactive Demo 2.2: dynamics as a function of the initial valueLet's now set $r_{\rm init}$ to a value of your choice in this demo. How does the solution change? What do you observe? How does that relate to the previous plot of $\frac{dr}{dt}$?
###Code
# @markdown Make sure you execute this cell to enable the widget!
pars = default_pars_single(w=5.0, I_ext=0.5)
def plot_single_diffEinit(r_init):
pars['r_init'] = r_init
r = simulate_single(pars)
plt.figure()
plt.plot(pars['range_t'], r, 'b', zorder=1)
plt.plot(0, r[0], 'bo', alpha=0.7, zorder=2)
plt.xlabel('t (ms)', fontsize=16)
plt.ylabel(r'$r(t)$', fontsize=16)
plt.ylim(0, 1.0)
plt.show()
_ = widgets.interact(plot_single_diffEinit, r_init=(0, 1, 0.02))
# to_remove explanation
"""
There are three fixed points of the system for these values of
w and I_ext. Choose the initial value in this demo and see in which direction
the system output moves. When r_init is in the vicinity of the leftmost fixed points
it moves towards the left most fixed point. When r_init is in the vicinity of the
rightmost fixed points it moves towards the rightmost fixed point.
""";
###Output
_____no_output_____
###Markdown
We will plot the trajectories of $r(t)$ with $r_{\text{init}} = 0.0, 0.1, 0.2,..., 0.9$.
###Code
# @markdown Execute this cell to see the trajectories!
pars = default_pars_single()
pars['w'] = 5.0
pars['I_ext'] = 0.5
plt.figure(figsize=(8, 5))
for ie in range(10):
pars['r_init'] = 0.1 * ie # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie))
plt.xlabel('t (ms)')
plt.title('Two steady states?')
plt.ylabel(r'$r$(t)')
plt.legend(loc=[1.01, -0.06], fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
We have three fixed points but only two steady states showing up - what's happening? It turns out that the stability of the fixed points matters. If a fixed point is stable, a trajectory starting near that fixed point will stay close to that fixed point and converge to it (the steady state will equal the fixed point). If a fixed point is unstable, any trajectories starting close to it will diverge and go towards stable fixed points. In fact, the only way for a trajectory to reach a stable state at an unstable fixed point is if the initial value **exactly** equals the value of the fixed point. Think! 2.2: Stable vs unstable fixed pointsWhich of the fixed points for the model we've been examining in this section are stable vs unstable?
###Code
# @markdown Execute to print fixed point values
# Initial guesses for fixed points
r_guess_vector = [0, .4, .9]
# Find fixed point numerically
x_fps = my_fp_finder(pars, r_guess_vector)
print(f'Our fixed points are {x_fps}')
# to_remove explanation
"""
The trajectory is converging to either the first fixed point or the third so those
ones are stable. The second fixed point is unstable as we do not see trajectories converging
to it.
"""
###Output
_____no_output_____
###Markdown
We can simulate the trajectory if we start at the unstable fixed point: you can see that it remains at that fixed point (the red line below).
###Code
# @markdown Execute to visualize trajectory starting at unstable fixed point
pars = default_pars_single()
pars['w'] = 5.0
pars['I_ext'] = 0.5
plt.figure(figsize=(8, 5))
for ie in range(10):
pars['r_init'] = 0.1 * ie # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie))
pars['r_init'] = x_fps[1] # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'r', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.4f' % (x_fps[1]))
plt.xlabel('t (ms)')
plt.title('Two steady states?')
plt.ylabel(r'$r$(t)')
plt.legend(loc=[1.01, -0.06], fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
See Bonus Section 1 to cover how to determine the stability of fixed points in a quantitative way. Think! 2: Inhibitory populationsThroughout the tutorial, we have assumed $w> 0 $, i.e., we considered a single population of **excitatory** neurons. What do you think will be the behavior of a population of inhibitory neurons, i.e., where $w> 0$ is replaced by $w< 0$?
###Code
# to_remove explanation
"""
You can check this by going back the interactive demo 2.1 and set the
weight to w<0. You will notice that the system has only one fixed point and that
is at zero value. For this particular dynamics, the system will eventually converge
to zero. But try it out!
""";
###Output
_____no_output_____
###Markdown
--- Summary*Estimated timing of tutorial: 1 hour, 25 minutes*In this tutorial, we have investigated the dynamics of a rate-based single population of neurons.We learned about:- The effect of the input parameters and the time constant of the network on the dynamics of the population.- How to find the fixed point(s) of the system.We build on these concepts in the bonus material - check it out if you have time. You will learn:- How to determine the stability of a fixed point by linearizing the system.- How to add realistic inputs to our model. --- Bonus --- Bonus Section 1: Stability analysis via linearization of the dynamics
###Code
# @title Video 3: Stability of fixed points
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1oA411e7eg", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="KKMlWWU83Jg", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Here we will dive into the math of how to figure out the stability of a fixed point.Just like in our equation for the feedforward network, a generic linear system $\frac{dx}{dt} = \lambda (x - b)$, has a fixed point for $x=b$. The analytical solution of such a system can be found to be:$$x(t) = b + \big{(} x(0) - b \big{)} \text{e}^{\lambda t}.$$Now consider a small perturbation of the activity around the fixed point: $x(0) = b + \epsilon$, where $|\epsilon| \ll 1$. Will the perturbation $\epsilon(t)$ grow with time or will it decay to the fixed point? The evolution of the perturbation with time can be written, using the analytical solution for $x(t)$, as:$$\epsilon (t) = x(t) - b = \epsilon \text{e}^{\lambda t}$$- if $\lambda < 0$, $\epsilon(t)$ decays to zero, $x(t)$ will still converge to $b$ and the fixed point is "**stable**".- if $\lambda > 0$, $\epsilon(t)$ grows with time, $x(t)$ will leave the fixed point $b$ exponentially, and the fixed point is, therefore, "**unstable**" . Similar to what we did in the linear system above, in order to determine the stability of a fixed point $r^{*}$ of the excitatory population dynamics, we perturb Equation (1) around $r^{*}$ by $\epsilon$, i.e. $r = r^{*} + \epsilon$. We can plug in Equation (1) and obtain the equation determining the time evolution of the perturbation $\epsilon(t)$:\begin{align}\tau \frac{d\epsilon}{dt} \approx -\epsilon + w F'(w\cdot r^{*} + I_{\text{ext}};a,\theta) \epsilon \end{align}where $F'(\cdot)$ is the derivative of the transfer function $F(\cdot)$. We can rewrite the above equation as:\begin{align}\frac{d\epsilon}{dt} \approx \frac{\epsilon}{\tau }[-1 + w F'(w\cdot r^* + I_{\text{ext}};a,\theta)] \end{align}That is, as in the linear system above, the value of$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau \qquad (4)$$determines whether the perturbation will grow or decay to zero, i.e., $\lambda$ defines the stability of the fixed point. This value is called the **eigenvalue** of the dynamical system. The derivative of the sigmoid transfer function is:\begin{align} \frac{dF}{dx} & = \frac{d}{dx} (1+\exp\{-a(x-\theta)\})^{-1} \\& = a\exp\{-a(x-\theta)\} (1+\exp\{-a(x-\theta)\})^{-2}. \qquad (5)\end{align}We provide a helper function `dF` which computes this derivative.
###Code
# @markdown Execute this cell to enable helper function `dF` and visualize derivative
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
# Calculate the population activation
dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2
return dFdx
# Set parameters
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Compute derivative of transfer function
df = dF(x, pars['a'], pars['theta'])
# Visualize
plot_dFdt(x, df)
###Output
_____no_output_____
###Markdown
Bonus Coding Exercise 1: Compute eigenvaluesAs discussed above, for the case with $w=5.0$ and $I_{\text{ext}}=0.5$, the system displays **three** fixed points. However, when we simulated the dynamics and varied the initial conditions $r_{\rm init}$, we could only obtain **two** steady states. In this exercise, we will now check the stability of each of the three fixed points by calculating the corresponding eigenvalues with the function `eig_single`. Check the sign of each eigenvalue (i.e., stability of each fixed point). How many of the fixed points are stable?Note that the expression of the eigenvalue at fixed point $r^*$:$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau$$
###Code
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
#####################################################################
## TODO for students: compute eigenvalue and disable the error
raise NotImplementedError("Student excercise: compute the eigenvalue")
######################################################################
# Compute the eigenvalue
eig = ...
return eig
# Find the eigenvalues for all fixed points
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
for fp in x_fp:
eig_fp = eig_single(fp, **pars)
print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
# to_remove solution
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
# Compute the eigenvalue
eig = (-1. + w * dF(w * fp + I_ext, a, theta)) / tau
return eig
# Find the eigenvalues for all fixed points
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
for fp in x_fp:
eig_fp = eig_single(fp, **pars)
print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
**SAMPLE OUTPUT**```Fixed point1 at 0.042 with Eigenvalue=-0.583Fixed point2 at 0.447 with Eigenvalue=0.498Fixed point3 at 0.900 with Eigenvalue=-0.626``` We can see that the first and third fixed points are stable (negative eigenvalues) and the second is unstable (positive eigenvalue) - as we expected! --- Bonus Section 2: Noisy input drives the transition between two stable states As discussed in several previous tutorials, the Ornstein-Uhlenbeck (OU) process is usually used to generate a noisy input into the neuron. The OU input $\eta(t)$ follows: $$\tau_\eta \frac{d}{dt}\eta(t) = -\eta (t) + \sigma_\eta\sqrt{2\tau_\eta}\xi(t)$$Execute the following function `my_OU(pars, sig, myseed=False)` to generate an OU process.
###Code
# @markdown Execute to get helper function `my_OU` and visualize OU process
def my_OU(pars, sig, myseed=False):
"""
A functions that generates Ornstein-Uhlenback process
Args:
pars : parameter dictionary
sig : noise amplitute
myseed : random seed. int or boolean
Returns:
I : Ornstein-Uhlenbeck input current
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
tau_ou = pars['tau_ou'] # [ms]
# set random seed
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# Initialize
noise = np.random.randn(Lt)
I_ou = np.zeros(Lt)
I_ou[0] = noise[0] * sig
# generate OU
for it in range(Lt - 1):
I_ou[it + 1] = (I_ou[it]
+ dt / tau_ou * (0. - I_ou[it])
+ np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1])
return I_ou
pars = default_pars_single(T=100)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
I_ou = my_OU(pars, sig=sig_ou, myseed=2020)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], I_ou, 'r')
plt.xlabel('t (ms)')
plt.ylabel(r'$I_{\mathrm{OU}}$')
plt.show()
###Output
_____no_output_____
###Markdown
In the presence of two or more fixed points, noisy inputs can drive a transition between the fixed points! Here, we stimulate an E population for 1,000 ms applying OU inputs.
###Code
# @markdown Execute this cell to simulate E population with OU inputs
pars = default_pars_single(T=1000)
pars['w'] = 5.0
sig_ou = 0.7
pars['tau_ou'] = 1. # [ms]
pars['I_ext'] = 0.56 + my_OU(pars, sig=sig_ou, myseed=2020)
r = simulate_single(pars)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], r, 'b', alpha=0.8)
plt.xlabel('t (ms)')
plt.ylabel(r'$r(t)$')
plt.show()
###Output
_____no_output_____
###Markdown
Tutorial 1: Neural Rate Models**Week 2, Day 4: Dynamic Networks****By Neuromatch Academy**__Content creators:__ Qinglong Gu, Songtin Li, Arvind Kumar, John Murray, Julijana Gjorgjieva __Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Spiros Chavlis, Michael Waskom --- Tutorial ObjectivesThe brain is a complex system, not because it is composed of a large number of diverse types of neurons, but mainly because of how neurons are connected to each other. The brain is indeed a network of highly specialized neuronal networks. The activity of a neural network constantly evolves in time. For this reason, neurons can be modeled as dynamical systems. The dynamical system approach is only one of the many modeling approaches that computational neuroscientists have developed (other points of view include information processing, statistical models, etc.). How the dynamics of neuronal networks affect the representation and processing of information in the brain is an open question. However, signatures of altered brain dynamics present in many brain diseases (e.g., in epilepsy or Parkinson's disease) tell us that it is crucial to study network activity dynamics if we want to understand the brain.In this tutorial, we will simulate and study one of the simplest models of biological neuronal networks. Instead of modeling and simulating individual excitatory neurons (e.g., LIF models that you implemented yesterday), we will treat them as a single homogeneous population and approximate their dynamics using a single one-dimensional equation describing the evolution of their average spiking rate in time.In this tutorial, we will learn how to build a firing rate model of a single population of excitatory neurons. **Steps:**- Write the equation for the firing rate dynamics of a 1D excitatory population.- Visualize the response of the population as a function of parameters such as threshold level and gain, using the frequency-current (F-I) curve.- Numerically simulate the dynamics of the excitatory population and find the fixed points of the system. - Investigate the stability of the fixed points by linearizing the dynamics around them. --- Setup
###Code
# Imports
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt # root-finding algorithm
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Helper functions
def plot_fI(x, f):
plt.figure(figsize=(6, 4)) # plot the figure
plt.plot(x, f, 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
def plot_dr_r(r, drdt, x_fps=None):
plt.figure()
plt.plot(r, drdt, 'k')
plt.plot(r, 0. * r, 'k--')
if x_fps is not None:
plt.plot(x_fps, np.zeros_like(x_fps), "ko", ms=12)
plt.xlabel(r'$r$')
plt.ylabel(r'$\frac{dr}{dt}$', fontsize=20)
plt.ylim(-0.1, 0.1)
def plot_dFdt(x, dFdt):
plt.figure()
plt.plot(x, dFdt, 'r')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('dF(x)', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Neuronal network dynamics
###Code
# @title Video 1: Dynamic networks
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="p848349hPyw", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Section 1.1: Dynamics of a single excitatory populationIndividual neurons respond by spiking. When we average the spikes of neurons in a population, we can define the average firing activity of the population. In this model, we are interested in how the population-averaged firing varies as a function of time and network parameters. Mathematically, we can describe the firing rate dynamic as:\begin{align}\tau \frac{dr}{dt} &= -r + F(w\cdot r + I_{\text{ext}}) \quad\qquad (1)\end{align}$r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $w$ denotes the strength (synaptic weight) of the recurrent input to the population, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs.To start building the model, please execute the cell below to initialize the simulation parameters.
###Code
# @markdown *Execute this cell to set default parameters for a single excitatory population model*
def default_pars_single(**kwargs):
pars = {}
# Excitatory parameters
pars['tau'] = 1. # Timescale of the E population [ms]
pars['a'] = 1.2 # Gain of the E population
pars['theta'] = 2.8 # Threshold of the E population
# Connection strength
pars['w'] = 0. # E to E, we first set it to 0
# External input
pars['I_ext'] = 0.
# simulation parameters
pars['T'] = 20. # Total duration of simulation [ms]
pars['dt'] = .1 # Simulation time step [ms]
pars['r_init'] = 0.2 # Initial value of E
# External parameters if any
pars.update(kwargs)
# Vector of discretized time points [ms]
pars['range_t'] = np.arange(0, pars['T'], pars['dt'])
return pars
###Output
_____no_output_____
###Markdown
You can now use:- `pars = default_pars_single()` to get all the parameters, and then you can execute `print(pars)` to check these parameters. - `pars = default_pars_single(T=T_sim, dt=time_step)` to set new simulation time and time step- To update an existing parameter dictionary, use `pars['New_para'] = value`Because `pars` is a dictionary, it can be passed to a function that requires individual parameters as arguments using `my_func(**pars)` syntax. Section 1.2: F-I curvesIn electrophysiology, a neuron is often characterized by its spike rate output in response to input currents. This is often called the **F-I** curve, denoting the output spike frequency (**F**) in response to different injected currents (**I**). We estimated this for an LIF neuron in yesterday's tutorial.The transfer function $F(\cdot)$ in Equation $1$ represents the gain of the population as a function of the total input. The gain is often modeled as a sigmoidal function, i.e., more input drive leads to a nonlinear increase in the population firing rate. The output firing rate will eventually saturate for high input values. A sigmoidal $F(\cdot)$ is parameterized by its gain $a$ and threshold $\theta$.$$ F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}} \quad(2)$$The argument $x$ represents the input to the population. Note that the second term is chosen so that $F(0;a,\theta)=0$.Many other transfer functions (generally monotonic) can be also used. Examples are the rectified linear function $ReLU(x)$ or the hyperbolic tangent $tanh(x)$. Exercise 1: Implement F-I curve Let's first investigate the activation functions before simulating the dynamics of the entire population. In this exercise, you will implement a sigmoidal **F-I** curve or transfer function $F(x)$, with gain $a$ and threshold level $\theta$ as parameters.
###Code
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
#################################################
## TODO for students: compute f = F(x) ##
# Fill out function and remove
raise NotImplementedError("Student excercise: implement the f-I function")
#################################################
# Define the sigmoidal transfer function f = F(x)
f = ...
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# f = F(x, pars['a'], pars['theta'])
# plot_fI(x, f)
# to_remove solution
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
# Define the sigmoidal transfer function f = F(x)
f = (1 + np.exp(-a * (x - theta)))**-1 - (1 + np.exp(a * theta))**-1
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
f = F(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_fI(x, f)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter exploration of F-I curveHere's an interactive demo that shows how the F-I curve changes for different values of the gain and threshold parameters. How do the gain and threshold parameters affect the F-I curve?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def interactive_plot_FI(a, theta):
"""
Population activation function.
Expecxts:
a : the gain of the function
theta : the threshold of the function
Returns:
plot the F-I curve with give parameters
"""
# set the range of input
x = np.arange(0, 10, .1)
plt.figure()
plt.plot(x, F(x, a, theta), 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
_ = widgets.interact(interactive_plot_FI, a=(0.3, 3, 0.3), theta=(2, 4, 0.2))
# to_remove explanation
"""
Discussion:
For the function we have chosen to model the F-I curve (eq 2),
- a determines the slope (gain) of the rising phase of the F-I curve
- theta determines the input at which the function F(x) reaches its mid-value (0.5).
That is, theta shifts the F-I curve along the horizontal axis.
For our neurons we are using in this tutorial:
- a controls the gain of the neuron population
- theta controls the threshold at which the neuron population starts to respond
""";
###Output
_____no_output_____
###Markdown
Section 1.3: Simulation scheme of E dynamicsBecause $F(\cdot)$ is a nonlinear function, the exact solution of Equation $1$ can not be determined via analytical methods. Therefore, numerical methods must be used to find the solution. In practice, the derivative on the left-hand side of Equation $1$ can be approximated using the Euler method on a time-grid of stepsize $\Delta t$:\begin{align}&\frac{dr}{dt} \approx \frac{r[k+1]-r[k]}{\Delta t} \end{align}where $r[k] = r(k\Delta t)$. Thus,$$\Delta r[k] = \frac{\Delta t}{\tau}[-r[k] + F(w\cdot r[k] + I_{\text{ext}}[k];a,\theta)]$$Hence, Equation (1) is updated at each time step by:$$r[k+1] = r[k] + \Delta r[k]$$
###Code
# @markdown *Execute this cell to enable the single population rate model simulator: `simulate_single`*
def simulate_single(pars):
"""
Simulate an excitatory population of neurons
Args:
pars : Parameter dictionary
Returns:
rE : Activity of excitatory population (array)
Example:
pars = default_pars_single()
r = simulate_single(pars)
"""
# Set parameters
tau, a, theta = pars['tau'], pars['a'], pars['theta']
w = pars['w']
I_ext = pars['I_ext']
r_init = pars['r_init']
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Initialize activity
r = np.zeros(Lt)
r[0] = r_init
I_ext = I_ext * np.ones(Lt)
# Update the E activity
for k in range(Lt - 1):
dr = dt / tau * (-r[k] + F(w * r[k] + I_ext[k], a, theta))
r[k+1] = r[k] + dr
return r
help(simulate_single)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter Exploration of single population dynamicsNote that $w=0$, as in the default setting, means no recurrent input to the neuron population in Equation (1). Hence, the dynamics are entirely determined by the external input $I_{\text{ext}}$. Explore these dynamics in this interactive demo.How does $r_{\text{sim}}(t)$ change with different $I_{\text{ext}}$ values? How does it change with different $\tau$ values? Investigate the relationship between $F(I_{\text{ext}}; a, \theta)$ and the steady value of $r(t)$. Note that, $r_{\rm ana}(t)$ denotes the analytical solution - you will learn how this is computed in the next section.
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
# get default parameters
pars = default_pars_single(T=20.)
def Myplot_E_diffI_difftau(I_ext, tau):
# set external input and time constant
pars['I_ext'] = I_ext
pars['tau'] = tau
# simulation
r = simulate_single(pars)
# Analytical Solution
r_ana = (pars['r_init']
+ (F(I_ext, pars['a'], pars['theta'])
- pars['r_init']) * (1. - np.exp(-pars['range_t'] / pars['tau'])))
# plot
plt.figure()
plt.plot(pars['range_t'], r, 'b', label=r'$r_{\mathrm{sim}}$(t)', alpha=0.5,
zorder=1)
plt.plot(pars['range_t'], r_ana, 'b--', lw=5, dashes=(2, 2),
label=r'$r_{\mathrm{ana}}$(t)', zorder=2)
plt.plot(pars['range_t'],
F(I_ext, pars['a'], pars['theta']) * np.ones(pars['range_t'].size),
'k--', label=r'$F(I_{\mathrm{ext}})$')
plt.xlabel('t (ms)', fontsize=16.)
plt.ylabel('Activity r(t)', fontsize=16.)
plt.legend(loc='best', fontsize=14.)
plt.show()
_ = widgets.interact(Myplot_E_diffI_difftau, I_ext=(0.0, 10., 1.),
tau=(1., 5., 0.2))
# to_remove explanation
"""
Discussion:
Given the choice of F-I curve (eq 2) and dynamics of the neuron population (eq. 1)
the neurons have two fixed points or steady-state responses irrespective of the input.
- Weak inputs to the neurons eventually result in the activity converging to zero
- Strong inputs to the neurons eventually result in the activity converging to max value
The time constant tau, does not affect the steady-state response but it determines
the time the neurons take to reach to their fixed point.
""";
###Output
_____no_output_____
###Markdown
Think!Above, we have numerically solved a system driven by a positive input. Yet, $r_E(t)$ either decays to zero or reaches a fixed non-zero value.- Why doesn't the solution of the system "explode" in a finite time? In other words, what guarantees that $r_E$(t) stays finite? - Which parameter would you change in order to increase the maximum value of the response?
###Code
# to_remove explanation
"""
Discussion:
1) As the F-I curve is bounded between zero and one, the system doesn't explode.
The f-curve guarantees this property
2) One way to increase the maximum response is to change the f-I curve. For
example, the ReLU is an unbounded function, and thus will increase the overall maximal
response of the network.
""";
###Output
_____no_output_____
###Markdown
--- Section 2: Fixed points of the single population system
###Code
# @title Video 2: Fixed point
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Ox3ELd1UFyo", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
As you varied the two parameters in the last Interactive Demo, you noticed that, while at first the system output quickly changes, with time, it reaches its maximum/minimum value and does not change anymore. The value eventually reached by the system is called the **steady state** of the system, or the **fixed point**. Essentially, in the steady states the derivative with respect to time of the activity ($r$) is zero, i.e. $\displaystyle \frac{dr}{dt}=0$. We can find that the steady state of the Equation. (1) by setting $\displaystyle{\frac{dr}{dt}=0}$ and solve for $r$:$$-r_{\text{steady}} + F(w\cdot r_{\text{steady}} + I_{\text{ext}};a,\theta) = 0, \qquad (3)$$When it exists, the solution of Equation. (3) defines a **fixed point** of the dynamical system in Equation (1). Note that if $F(x)$ is nonlinear, it is not always possible to find an analytical solution, but the solution can be found via numerical simulations, as we will do later.From the Interactive Demo, one could also notice that the value of $\tau$ influences how quickly the activity will converge to the steady state from its initial value. In the specific case of $w=0$, we can also analytically compute the solution of Equation (1) (i.e., the thick blue dashed line) and deduce the role of $\tau$ in determining the convergence to the fixed point: $$\displaystyle{r(t) = \big{[}F(I_{\text{ext}};a,\theta) -r(t=0)\big{]} (1-\text{e}^{-\frac{t}{\tau}})} + r(t=0)$$ \\We can now numerically calculate the fixed point with a root finding algorithm. Exercise 2: Visualization of the fixed pointsWhen it is not possible to find the solution for Equation (3) analytically, a graphical approach can be taken. To that end, it is useful to plot $\displaystyle{\frac{dr}{dt}}$ as a function of $r$. The values of $r$ for which the plotted function crosses zero on the y axis correspond to fixed points. Here, let us, for example, set $w=5.0$ and $I^{\text{ext}}=0.5$. From Equation (1), you can obtain$$\frac{dr}{dt} = [-r + F(w\cdot r + I^{\text{ext}})]\,/\,\tau $$Then, plot the $dr/dt$ as a function of $r$, and check for the presence of fixed points.
###Code
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
#########################################################################
# TODO compute drdt and disable the error
raise NotImplementedError("Finish the compute_drdt function")
#########################################################################
# Calculate drdt
drdt = ...
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Uncomment to test your function
# drdt = compute_drdt(r, **pars)
# plot_dr_r(r, drdt)
# to_remove solution
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
# Calculate drdt
drdt = (-r + F(w * r + I_ext, a, theta)) / tau
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
with plt.xkcd():
plot_dr_r(r, drdt)
###Output
_____no_output_____
###Markdown
Exercise 3: Fixed point calculationWe will now find the fixed points numerically. To do so, we need to specif initial values ($r_{\text{guess}}$) for the root-finding algorithm to start from. From the line $\displaystyle{\frac{dr}{dt}}$ plotted above in Exercise 2, initial values can be chosen as a set of values close to where the line crosses zero on the y axis (real fixed point).The next cell defines three helper functions that we will use:- `my_fp_single(r_guess, **pars)` uses a root-finding algorithm to locate a fixed point near a given initial value- `check_fp_single(x_fp, **pars)`, verifies that the values of $r_{\rm fp}$ for which $\displaystyle{\frac{dr}{dt}} = 0$ are the true fixed points- `my_fp_finder(r_guess_vector, **pars)` accepts an array of initial values and finds the same number of fixed points, using the above two functions
###Code
# @markdown *Execute this cell to enable the fixed point functions*
def my_fp_single(r_guess, a, theta, w, I_ext, **other_pars):
"""
Calculate the fixed point through drE/dt=0
Args:
r_guess : Initial value used for scipy.optimize function
a, theta, w, I_ext : simulation parameters
Returns:
x_fp : value of fixed point
"""
# define the right hand of E dynamics
def my_WCr(x):
r = x
drdt = (-r + F(w * r + I_ext, a, theta))
y = np.array(drdt)
return y
x0 = np.array(r_guess)
x_fp = opt.root(my_WCr, x0).x.item()
return x_fp
def check_fp_single(x_fp, a, theta, w, I_ext, mytol=1e-4, **other_pars):
"""
Verify |dr/dt| < mytol
Args:
fp : value of fixed point
a, theta, w, I_ext: simulation parameters
mytol : tolerance, default as 10^{-4}
Returns :
Whether it is a correct fixed point: True/False
"""
# calculate Equation(3)
y = x_fp - F(w * x_fp + I_ext, a, theta)
# Here we set tolerance as 10^{-4}
return np.abs(y) < mytol
def my_fp_finder(pars, r_guess_vector, mytol=1e-4):
"""
Calculate the fixed point(s) through drE/dt=0
Args:
pars : Parameter dictionary
r_guess_vector : Initial values used for scipy.optimize function
mytol : tolerance for checking fixed point, default as 10^{-4}
Returns:
x_fps : values of fixed points
"""
x_fps = []
correct_fps = []
for r_guess in r_guess_vector:
x_fp = my_fp_single(r_guess, **pars)
if check_fp_single(x_fp, **pars, mytol=mytol):
x_fps.append(x_fp)
return x_fps
help(my_fp_finder)
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
#############################################################################
# TODO for students:
# Define initial values close to the intersections of drdt and y=0
# (How many initial values? Hint: How many times do the two lines intersect?)
# Calculate the fixed point with these initial values and plot them
#############################################################################
r_guess_vector = [...]
# Uncomment to test your values
# x_fps = my_fp_finder(pars, r_guess_vector)
# plot_dr_r(r, drdt, x_fps)
# to_remove solution
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
r_guess_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_guess_vector)
with plt.xkcd():
plot_dr_r(r, drdt, x_fps)
###Output
_____no_output_____
###Markdown
Interactive Demo: fixed points as a function of recurrent and external inputs.You can now explore how the previous plot changes when the recurrent coupling $w$ and the external input $I_{\text{ext}}$ take different values. How does the number of fixed points change?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def plot_intersection_single(w, I_ext):
# set your parameters
pars = default_pars_single(w=w, I_ext=I_ext)
# find fixed points
r_init_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_init_vector)
# plot
r = np.linspace(0, 1., 1000)
drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']
plot_dr_r(r, drdt, x_fps)
_ = widgets.interact(plot_intersection_single, w=(1, 7, 0.2),
I_ext=(0, 3, 0.1))
# to_remove explanation
"""
Discussion:
The fixed points of the single excitatory neuron population are determined by both
recurrent connections w and external input I_ext. In a previous interactive demo
we saw how the system showed two different steady-states when w = 0. But when w
doe not equal 0, for some range of w the system shows three fixed points (the middle
one being unstable) and the steady state depends on the initial conditions (i.e.
r at time zero.).
More on this will be explained in the next section.
""";
###Output
_____no_output_____
###Markdown
--- SummaryIn this tutorial, we have investigated the dynamics of a rate-based single population of neurons.We learned about:- The effect of the input parameters and the time constant of the network on the dynamics of the population.- How to find the fixed point(s) of the system.Next, we have two Bonus, but important concepts in dynamical system analysis and simulation. If you have time left, watch the next video and proceed to solve the exercises. You will learn:- How to determine the stability of a fixed point by linearizing the system.- How to add realistic inputs to our model. --- Bonus 1: Stability of a fixed point
###Code
# @title Video 3: Stability of fixed points
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="KKMlWWU83Jg", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Initial values and trajectoriesHere, let us first set $w=5.0$ and $I_{\text{ext}}=0.5$, and investigate the dynamics of $r(t)$ starting with different initial values $r(0) \equiv r_{\text{init}}$. We will plot the trajectories of $r(t)$ with $r_{\text{init}} = 0.0, 0.1, 0.2,..., 0.9$.
###Code
# @markdown Execute this cell to see the trajectories!
pars = default_pars_single()
pars['w'] = 5.0
pars['I_ext'] = 0.5
plt.figure(figsize=(8, 5))
for ie in range(10):
pars['r_init'] = 0.1 * ie # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie))
plt.xlabel('t (ms)')
plt.title('Two steady states?')
plt.ylabel(r'$r$(t)')
plt.legend(loc=[1.01, -0.06], fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
Interactive Demo: dynamics as a function of the initial valueLet's now set $r_{\rm init}$ to a value of your choice in this demo. How does the solution change? What do you observe?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
pars = default_pars_single(w=5.0, I_ext=0.5)
def plot_single_diffEinit(r_init):
pars['r_init'] = r_init
r = simulate_single(pars)
plt.figure()
plt.plot(pars['range_t'], r, 'b', zorder=1)
plt.plot(0, r[0], 'bo', alpha=0.7, zorder=2)
plt.xlabel('t (ms)', fontsize=16)
plt.ylabel(r'$r(t)$', fontsize=16)
plt.ylim(0, 1.0)
plt.show()
_ = widgets.interact(plot_single_diffEinit, r_init=(0, 1, 0.02))
# to_remove explanation
"""
Discussion:
To better appreciate what is happening here, you should go back to the previous
interactive demo. Set the w = 5 and I_ext = 0.5.
You will find that there are three fixed points of the system for these values of
w and I_ext. Now, choose the initial value in this demo and see in which direction
the system output moves. When r_init is in the vicinity of the leftmost fixed points
it moves towards the left most fixed point. When r_init is in the vicinity of the
rightmost fixed points it moves towards the rightmost fixed point.
""";
###Output
_____no_output_____
###Markdown
Stability analysis via linearization of the dynamicsJust like Equation $1$ in the case ($w=0$) discussed above, a generic linear system $$\frac{dx}{dt} = \lambda (x - b),$$ has a fixed point for $x=b$. The analytical solution of such a system can be found to be:$$x(t) = b + \big{(} x(0) - b \big{)} \text{e}^{\lambda t}.$$ Now consider a small perturbation of the activity around the fixed point: $x(0) = b+ \epsilon$, where $|\epsilon| \ll 1$. Will the perturbation $\epsilon(t)$ grow with time or will it decay to the fixed point? The evolution of the perturbation with time can be written, using the analytical solution for $x(t)$, as: $$\epsilon (t) = x(t) - b = \epsilon \text{e}^{\lambda t}$$- if $\lambda < 0$, $\epsilon(t)$ decays to zero, $x(t)$ will still converge to $b$ and the fixed point is "**stable**".- if $\lambda > 0$, $\epsilon(t)$ grows with time, $x(t)$ will leave the fixed point $b$ exponentially, and the fixed point is, therefore, "**unstable**" . Compute the stability of Equation $1$Similar to what we did in the linear system above, in order to determine the stability of a fixed point $r^{*}$ of the excitatory population dynamics, we perturb Equation (1) around $r^{*}$ by $\epsilon$, i.e. $r = r^{*} + \epsilon$. We can plug in Equation (1) and obtain the equation determining the time evolution of the perturbation $\epsilon(t)$:\begin{align}\tau \frac{d\epsilon}{dt} \approx -\epsilon + w F'(w\cdot r^{*} + I_{\text{ext}};a,\theta) \epsilon \end{align}where $F'(\cdot)$ is the derivative of the transfer function $F(\cdot)$. We can rewrite the above equation as:\begin{align}\frac{d\epsilon}{dt} \approx \frac{\epsilon}{\tau }[-1 + w F'(w\cdot r^* + I_{\text{ext}};a,\theta)] \end{align}That is, as in the linear system above, the value of$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau \qquad (4)$$determines whether the perturbation will grow or decay to zero, i.e., $\lambda$ defines the stability of the fixed point. This value is called the **eigenvalue** of the dynamical system. Exercise 4: Compute $dF$The derivative of the sigmoid transfer function is:\begin{align} \frac{dF}{dx} & = \frac{d}{dx} (1+\exp\{-a(x-\theta)\})^{-1} \\& = a\exp\{-a(x-\theta)\} (1+\exp\{-a(x-\theta)\})^{-2}. \qquad (5)\end{align}Let's now find the expression for the derivative $\displaystyle{\frac{dF}{dx}}$ in the following cell and plot it.
###Code
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
###########################################################################
# TODO for students: compute dFdx ##
raise NotImplementedError("Student excercise: compute the deravitive of F")
###########################################################################
# Calculate the population activation
dFdx = ...
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# df = dF(x, pars['a'], pars['theta'])
# plot_dFdt(x, df)
# to_remove solution
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
# Calculate the population activation
dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
df = dF(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_dFdt(x, df)
###Output
_____no_output_____
###Markdown
Exercise 5: Compute eigenvaluesAs discussed above, for the case with $w=5.0$ and $I_{\text{ext}}=0.5$, the system displays **three** fixed points. However, when we simulated the dynamics and varied the initial conditions $r_{\rm init}$, we could only obtain **two** steady states. In this exercise, we will now check the stability of each of the three fixed points by calculating the corresponding eigenvalues with the function `eig_single`. Check the sign of each eigenvalue (i.e., stability of each fixed point). How many of the fixed points are stable?Note that the expression of the eigenvalue at fixed point $r^*$$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau$$
###Code
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
#####################################################################
## TODO for students: compute eigenvalue and disable the error
raise NotImplementedError("Student excercise: compute the eigenvalue")
######################################################################
# Compute the eigenvalue
eig = ...
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
# Uncomment below lines after completing the eig_single function.
# for fp in x_fp:
# eig_fp = eig_single(fp, **pars)
# print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
**SAMPLE OUTPUT**```Fixed point1 at 0.042 with Eigenvalue=-0.583Fixed point2 at 0.447 with Eigenvalue=0.498Fixed point3 at 0.900 with Eigenvalue=-0.626```
###Code
# to_remove solution
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
# Compute the eigenvalue
eig = (-1. + w * dF(w * fp + I_ext, a, theta)) / tau
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
for fp in x_fp:
eig_fp = eig_single(fp, **pars)
print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
Think! Throughout the tutorial, we have assumed $w> 0 $, i.e., we considered a single population of **excitatory** neurons. What do you think will be the behavior of a population of inhibitory neurons, i.e., where $w> 0$ is replaced by $w< 0$?
###Code
# to_remove explanation
"""
Discussion:
You can check this by going back the second last interactive demo and set the
weight to w<0. You will notice that the system has only one fixed point and that
is at zero value. For this particular dynamics, the system will eventually converge
to zero. But try it out.
""";
###Output
_____no_output_____
###Markdown
--- Bonus 2: Noisy input drives the transition between two stable states Ornstein-Uhlenbeck (OU) processAs discussed in several previous tutorials, the OU process is usually used to generate a noisy input into the neuron. The OU input $\eta(t)$ follows: $$\tau_\eta \frac{d}{dt}\eta(t) = -\eta (t) + \sigma_\eta\sqrt{2\tau_\eta}\xi(t)$$Execute the following function `my_OU(pars, sig, myseed=False)` to generate an OU process.
###Code
# @title OU process `my_OU(pars, sig, myseed=False)`
# @markdown Make sure you execute this cell to visualize the noise!
def my_OU(pars, sig, myseed=False):
"""
A functions that generates Ornstein-Uhlenback process
Args:
pars : parameter dictionary
sig : noise amplitute
myseed : random seed. int or boolean
Returns:
I : Ornstein-Uhlenbeck input current
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
tau_ou = pars['tau_ou'] # [ms]
# set random seed
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# Initialize
noise = np.random.randn(Lt)
I_ou = np.zeros(Lt)
I_ou[0] = noise[0] * sig
# generate OU
for it in range(Lt - 1):
I_ou[it + 1] = (I_ou[it]
+ dt / tau_ou * (0. - I_ou[it])
+ np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1])
return I_ou
pars = default_pars_single(T=100)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
I_ou = my_OU(pars, sig=sig_ou, myseed=2020)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], I_ou, 'r')
plt.xlabel('t (ms)')
plt.ylabel(r'$I_{\mathrm{OU}}$')
plt.show()
###Output
_____no_output_____
###Markdown
Example: Up-Down transitionIn the presence of two or more fixed points, noisy inputs can drive a transition between the fixed points! Here, we stimulate an E population for 1,000 ms applying OU inputs.
###Code
# @title Simulation of an E population with OU inputs
# @markdown Make sure you execute this cell to spot the Up-Down states!
pars = default_pars_single(T=1000)
pars['w'] = 5.0
sig_ou = 0.7
pars['tau_ou'] = 1. # [ms]
pars['I_ext'] = 0.56 + my_OU(pars, sig=sig_ou, myseed=2020)
r = simulate_single(pars)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], r, 'b', alpha=0.8)
plt.xlabel('t (ms)')
plt.ylabel(r'$r(t)$')
plt.show()
###Output
_____no_output_____
###Markdown
[](https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D4_DynamicNetworks/W2D4_Tutorial1.ipynb) Tutorial 1: Neural Rate Models**Week 2, Day 4: Dynamic Networks****By Neuromatch Academy**__Content creators:__ Qinglong Gu, Songtin Li, Arvind Kumar, John Murray, Julijana Gjorgjieva __Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Spiros Chavlis, Michael Waskom **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThe brain is a complex system, not because it is composed of a large number of diverse types of neurons, but mainly because of how neurons are connected to each other. The brain is indeed a network of highly specialized neuronal networks. The activity of a neural network constantly evolves in time. For this reason, neurons can be modeled as dynamical systems. The dynamical system approach is only one of the many modeling approaches that computational neuroscientists have developed (other points of view include information processing, statistical models, etc.). How the dynamics of neuronal networks affect the representation and processing of information in the brain is an open question. However, signatures of altered brain dynamics present in many brain diseases (e.g., in epilepsy or Parkinson's disease) tell us that it is crucial to study network activity dynamics if we want to understand the brain.In this tutorial, we will simulate and study one of the simplest models of biological neuronal networks. Instead of modeling and simulating individual excitatory neurons (e.g., LIF models that you implemented yesterday), we will treat them as a single homogeneous population and approximate their dynamics using a single one-dimensional equation describing the evolution of their average spiking rate in time.In this tutorial, we will learn how to build a firing rate model of a single population of excitatory neurons. **Steps:**- Write the equation for the firing rate dynamics of a 1D excitatory population.- Visualize the response of the population as a function of parameters such as threshold level and gain, using the frequency-current (F-I) curve.- Numerically simulate the dynamics of the excitatory population and find the fixed points of the system. - Investigate the stability of the fixed points by linearizing the dynamics around them. --- Setup
###Code
# Imports
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt # root-finding algorithm
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Helper functions
def plot_fI(x, f):
plt.figure(figsize=(6, 4)) # plot the figure
plt.plot(x, f, 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
def plot_dr_r(r, drdt, x_fps=None):
plt.figure()
plt.plot(r, drdt, 'k')
plt.plot(r, 0. * r, 'k--')
if x_fps is not None:
plt.plot(x_fps, np.zeros_like(x_fps), "ko", ms=12)
plt.xlabel(r'$r$')
plt.ylabel(r'$\frac{dr}{dt}$', fontsize=20)
plt.ylim(-0.1, 0.1)
def plot_dFdt(x, dFdt):
plt.figure()
plt.plot(x, dFdt, 'r')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('dF(x)', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Neuronal network dynamics
###Code
# @title Video 1: Dynamic networks
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="p848349hPyw", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Section 1.1: Dynamics of a single excitatory populationIndividual neurons respond by spiking. When we average the spikes of neurons in a population, we can define the average firing activity of the population. In this model, we are interested in how the population-averaged firing varies as a function of time and network parameters. Mathematically, we can describe the firing rate dynamic as:\begin{align}\tau \frac{dr}{dt} &= -r + F(w\cdot r + I_{\text{ext}}) \quad\qquad (1)\end{align}$r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $w$ denotes the strength (synaptic weight) of the recurrent input to the population, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs.To start building the model, please execute the cell below to initialize the simulation parameters.
###Code
# @markdown *Execute this cell to set default parameters for a single excitatory population model*
def default_pars_single(**kwargs):
pars = {}
# Excitatory parameters
pars['tau'] = 1. # Timescale of the E population [ms]
pars['a'] = 1.2 # Gain of the E population
pars['theta'] = 2.8 # Threshold of the E population
# Connection strength
pars['w'] = 0. # E to E, we first set it to 0
# External input
pars['I_ext'] = 0.
# simulation parameters
pars['T'] = 20. # Total duration of simulation [ms]
pars['dt'] = .1 # Simulation time step [ms]
pars['r_init'] = 0.2 # Initial value of E
# External parameters if any
pars.update(kwargs)
# Vector of discretized time points [ms]
pars['range_t'] = np.arange(0, pars['T'], pars['dt'])
return pars
###Output
_____no_output_____
###Markdown
You can now use:- `pars = default_pars_single()` to get all the parameters, and then you can execute `print(pars)` to check these parameters. - `pars = default_pars_single(T=T_sim, dt=time_step)` to set new simulation time and time step- To update an existing parameter dictionary, use `pars['New_para'] = value`Because `pars` is a dictionary, it can be passed to a function that requires individual parameters as arguments using `my_func(**pars)` syntax. Section 1.2: F-I curvesIn electrophysiology, a neuron is often characterized by its spike rate output in response to input currents. This is often called the **F-I** curve, denoting the output spike frequency (**F**) in response to different injected currents (**I**). We estimated this for an LIF neuron in yesterday's tutorial.The transfer function $F(\cdot)$ in Equation $1$ represents the gain of the population as a function of the total input. The gain is often modeled as a sigmoidal function, i.e., more input drive leads to a nonlinear increase in the population firing rate. The output firing rate will eventually saturate for high input values. A sigmoidal $F(\cdot)$ is parameterized by its gain $a$ and threshold $\theta$.$$ F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}} \quad(2)$$The argument $x$ represents the input to the population. Note that the second term is chosen so that $F(0;a,\theta)=0$.Many other transfer functions (generally monotonic) can be also used. Examples are the rectified linear function $ReLU(x)$ or the hyperbolic tangent $tanh(x)$. Exercise 1: Implement F-I curve Let's first investigate the activation functions before simulating the dynamics of the entire population. In this exercise, you will implement a sigmoidal **F-I** curve or transfer function $F(x)$, with gain $a$ and threshold level $\theta$ as parameters.
###Code
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
#################################################
## TODO for students: compute f = F(x) ##
# Fill out function and remove
raise NotImplementedError("Student excercise: implement the f-I function")
#################################################
# Define the sigmoidal transfer function f = F(x)
f = ...
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# f = F(x, pars['a'], pars['theta'])
# plot_fI(x, f)
# to_remove solution
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
# Define the sigmoidal transfer function f = F(x)
f = (1 + np.exp(-a * (x - theta)))**-1 - (1 + np.exp(a * theta))**-1
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
f = F(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_fI(x, f)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter exploration of F-I curveHere's an interactive demo that shows how the F-I curve changes for different values of the gain and threshold parameters. How do the gain and threshold parameters affect the F-I curve?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def interactive_plot_FI(a, theta):
"""
Population activation function.
Expecxts:
a : the gain of the function
theta : the threshold of the function
Returns:
plot the F-I curve with give parameters
"""
# set the range of input
x = np.arange(0, 10, .1)
plt.figure()
plt.plot(x, F(x, a, theta), 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
_ = widgets.interact(interactive_plot_FI, a=(0.3, 3, 0.3), theta=(2, 4, 0.2))
# to_remove explanation
"""
Discussion:
For the function we have chosen to model the F-I curve (eq 2),
- a determines the slope (gain) of the rising phase of the F-I curve
- theta determines the input at which the function F(x) reaches its mid-value (0.5).
That is, theta shifts the F-I curve along the horizontal axis.
For our neurons we are using in this tutorial:
- a controls the gain of the neuron population
- theta controls the threshold at which the neuron population starts to respond
""";
###Output
_____no_output_____
###Markdown
Section 1.3: Simulation scheme of E dynamicsBecause $F(\cdot)$ is a nonlinear function, the exact solution of Equation $1$ can not be determined via analytical methods. Therefore, numerical methods must be used to find the solution. In practice, the derivative on the left-hand side of Equation $1$ can be approximated using the Euler method on a time-grid of stepsize $\Delta t$:\begin{align}&\frac{dr}{dt} \approx \frac{r[k+1]-r[k]}{\Delta t} \end{align}where $r[k] = r(k\Delta t)$. Thus,$$\Delta r[k] = \frac{\Delta t}{\tau}[-r[k] + F(w\cdot r[k] + I_{\text{ext}}[k];a,\theta)]$$Hence, Equation (1) is updated at each time step by:$$r[k+1] = r[k] + \Delta r[k]$$
###Code
# @markdown *Execute this cell to enable the single population rate model simulator: `simulate_single`*
def simulate_single(pars):
"""
Simulate an excitatory population of neurons
Args:
pars : Parameter dictionary
Returns:
rE : Activity of excitatory population (array)
Example:
pars = default_pars_single()
r = simulate_single(pars)
"""
# Set parameters
tau, a, theta = pars['tau'], pars['a'], pars['theta']
w = pars['w']
I_ext = pars['I_ext']
r_init = pars['r_init']
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Initialize activity
r = np.zeros(Lt)
r[0] = r_init
I_ext = I_ext * np.ones(Lt)
# Update the E activity
for k in range(Lt - 1):
dr = dt / tau * (-r[k] + F(w * r[k] + I_ext[k], a, theta))
r[k+1] = r[k] + dr
return r
help(simulate_single)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter Exploration of single population dynamicsNote that $w=0$, as in the default setting, means no recurrent input to the neuron population in Equation (1). Hence, the dynamics are entirely determined by the external input $I_{\text{ext}}$. Explore these dynamics in this interactive demo.How does $r_{\text{sim}}(t)$ change with different $I_{\text{ext}}$ values? How does it change with different $\tau$ values? Investigate the relationship between $F(I_{\text{ext}}; a, \theta)$ and the steady value of $r(t)$. Note that, $r_{\rm ana}(t)$ denotes the analytical solution - you will learn how this is computed in the next section.
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
# get default parameters
pars = default_pars_single(T=20.)
def Myplot_E_diffI_difftau(I_ext, tau):
# set external input and time constant
pars['I_ext'] = I_ext
pars['tau'] = tau
# simulation
r = simulate_single(pars)
# Analytical Solution
r_ana = (pars['r_init']
+ (F(I_ext, pars['a'], pars['theta'])
- pars['r_init']) * (1. - np.exp(-pars['range_t'] / pars['tau'])))
# plot
plt.figure()
plt.plot(pars['range_t'], r, 'b', label=r'$r_{\mathrm{sim}}$(t)', alpha=0.5,
zorder=1)
plt.plot(pars['range_t'], r_ana, 'b--', lw=5, dashes=(2, 2),
label=r'$r_{\mathrm{ana}}$(t)', zorder=2)
plt.plot(pars['range_t'],
F(I_ext, pars['a'], pars['theta']) * np.ones(pars['range_t'].size),
'k--', label=r'$F(I_{\mathrm{ext}})$')
plt.xlabel('t (ms)', fontsize=16.)
plt.ylabel('Activity r(t)', fontsize=16.)
plt.legend(loc='best', fontsize=14.)
plt.show()
_ = widgets.interact(Myplot_E_diffI_difftau, I_ext=(0.0, 10., 1.),
tau=(1., 5., 0.2))
# to_remove explanation
"""
Discussion:
Given the choice of F-I curve (eq 2) and dynamics of the neuron population (eq. 1)
the neurons have two fixed points or steady-state responses irrespective of the input.
- Weak inputs to the neurons eventually result in the activity converging to zero
- Strong inputs to the neurons eventually result in the activity converging to max value
The time constant tau, does not affect the steady-state response but it determines
the time the neurons take to reach to their fixed point.
""";
###Output
_____no_output_____
###Markdown
Think!Above, we have numerically solved a system driven by a positive input. Yet, $r_E(t)$ either decays to zero or reaches a fixed non-zero value.- Why doesn't the solution of the system "explode" in a finite time? In other words, what guarantees that $r_E$(t) stays finite? - Which parameter would you change in order to increase the maximum value of the response?
###Code
# to_remove explanation
"""
Discussion:
1) As the F-I curve is bounded between zero and one, the system doesn't explode.
The f-curve guarantees this property
2) One way to increase the maximum response is to change the f-I curve. For
example, the ReLU is an unbounded function, and thus will increase the overall maximal
response of the network.
""";
###Output
_____no_output_____
###Markdown
--- Section 2: Fixed points of the single population system
###Code
# @title Video 2: Fixed point
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Ox3ELd1UFyo", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
As you varied the two parameters in the last Interactive Demo, you noticed that, while at first the system output quickly changes, with time, it reaches its maximum/minimum value and does not change anymore. The value eventually reached by the system is called the **steady state** of the system, or the **fixed point**. Essentially, in the steady states the derivative with respect to time of the activity ($r$) is zero, i.e. $\displaystyle \frac{dr}{dt}=0$. We can find that the steady state of the Equation. (1) by setting $\displaystyle{\frac{dr}{dt}=0}$ and solve for $r$:$$-r_{\text{steady}} + F(w\cdot r_{\text{steady}} + I_{\text{ext}};a,\theta) = 0, \qquad (3)$$When it exists, the solution of Equation. (3) defines a **fixed point** of the dynamical system in Equation (1). Note that if $F(x)$ is nonlinear, it is not always possible to find an analytical solution, but the solution can be found via numerical simulations, as we will do later.From the Interactive Demo, one could also notice that the value of $\tau$ influences how quickly the activity will converge to the steady state from its initial value. In the specific case of $w=0$, we can also analytically compute the solution of Equation (1) (i.e., the thick blue dashed line) and deduce the role of $\tau$ in determining the convergence to the fixed point: $$\displaystyle{r(t) = \big{[}F(I_{\text{ext}};a,\theta) -r(t=0)\big{]} (1-\text{e}^{-\frac{t}{\tau}})} + r(t=0)$$ \\We can now numerically calculate the fixed point with a root finding algorithm. Exercise 2: Visualization of the fixed pointsWhen it is not possible to find the solution for Equation (3) analytically, a graphical approach can be taken. To that end, it is useful to plot $\displaystyle{\frac{dr}{dt}}$ as a function of $r$. The values of $r$ for which the plotted function crosses zero on the y axis correspond to fixed points. Here, let us, for example, set $w=5.0$ and $I^{\text{ext}}=0.5$. From Equation (1), you can obtain$$\frac{dr}{dt} = [-r + F(w\cdot r + I^{\text{ext}})]\,/\,\tau $$Then, plot the $dr/dt$ as a function of $r$, and check for the presence of fixed points.
###Code
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
#########################################################################
# TODO compute drdt and disable the error
raise NotImplementedError("Finish the compute_drdt function")
#########################################################################
# Calculate drdt
drdt = ...
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Uncomment to test your function
# drdt = compute_drdt(r, **pars)
# plot_dr_r(r, drdt)
# to_remove solution
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
# Calculate drdt
drdt = (-r + F(w * r + I_ext, a, theta)) / tau
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
with plt.xkcd():
plot_dr_r(r, drdt)
###Output
_____no_output_____
###Markdown
Exercise 3: Fixed point calculationWe will now find the fixed points numerically. To do so, we need to specif initial values ($r_{\text{guess}}$) for the root-finding algorithm to start from. From the line $\displaystyle{\frac{dr}{dt}}$ plotted above in Exercise 2, initial values can be chosen as a set of values close to where the line crosses zero on the y axis (real fixed point).The next cell defines three helper functions that we will use:- `my_fp_single(r_guess, **pars)` uses a root-finding algorithm to locate a fixed point near a given initial value- `check_fp_single(x_fp, **pars)`, verifies that the values of $r_{\rm fp}$ for which $\displaystyle{\frac{dr}{dt}} = 0$ are the true fixed points- `my_fp_finder(r_guess_vector, **pars)` accepts an array of initial values and finds the same number of fixed points, using the above two functions
###Code
# @markdown *Execute this cell to enable the fixed point functions*
def my_fp_single(r_guess, a, theta, w, I_ext, **other_pars):
"""
Calculate the fixed point through drE/dt=0
Args:
r_guess : Initial value used for scipy.optimize function
a, theta, w, I_ext : simulation parameters
Returns:
x_fp : value of fixed point
"""
# define the right hand of E dynamics
def my_WCr(x):
r = x
drdt = (-r + F(w * r + I_ext, a, theta))
y = np.array(drdt)
return y
x0 = np.array(r_guess)
x_fp = opt.root(my_WCr, x0).x.item()
return x_fp
def check_fp_single(x_fp, a, theta, w, I_ext, mytol=1e-4, **other_pars):
"""
Verify |dr/dt| < mytol
Args:
fp : value of fixed point
a, theta, w, I_ext: simulation parameters
mytol : tolerance, default as 10^{-4}
Returns :
Whether it is a correct fixed point: True/False
"""
# calculate Equation(3)
y = x_fp - F(w * x_fp + I_ext, a, theta)
# Here we set tolerance as 10^{-4}
return np.abs(y) < mytol
def my_fp_finder(pars, r_guess_vector, mytol=1e-4):
"""
Calculate the fixed point(s) through drE/dt=0
Args:
pars : Parameter dictionary
r_guess_vector : Initial values used for scipy.optimize function
mytol : tolerance for checking fixed point, default as 10^{-4}
Returns:
x_fps : values of fixed points
"""
x_fps = []
correct_fps = []
for r_guess in r_guess_vector:
x_fp = my_fp_single(r_guess, **pars)
if check_fp_single(x_fp, **pars, mytol=mytol):
x_fps.append(x_fp)
return x_fps
help(my_fp_finder)
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
#############################################################################
# TODO for students:
# Define initial values close to the intersections of drdt and y=0
# (How many initial values? Hint: How many times do the two lines intersect?)
# Calculate the fixed point with these initial values and plot them
#############################################################################
r_guess_vector = [...]
# Uncomment to test your values
# x_fps = my_fp_finder(pars, r_guess_vector)
# plot_dr_r(r, drdt, x_fps)
# to_remove solution
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
r_guess_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_guess_vector)
with plt.xkcd():
plot_dr_r(r, drdt, x_fps)
###Output
_____no_output_____
###Markdown
Interactive Demo: fixed points as a function of recurrent and external inputs.You can now explore how the previous plot changes when the recurrent coupling $w$ and the external input $I_{\text{ext}}$ take different values. How does the number of fixed points change?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def plot_intersection_single(w, I_ext):
# set your parameters
pars = default_pars_single(w=w, I_ext=I_ext)
# find fixed points
r_init_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_init_vector)
# plot
r = np.linspace(0, 1., 1000)
drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']
plot_dr_r(r, drdt, x_fps)
_ = widgets.interact(plot_intersection_single, w=(1, 7, 0.2),
I_ext=(0, 3, 0.1))
# to_remove explanation
"""
Discussion:
The fixed points of the single excitatory neuron population are determined by both
recurrent connections w and external input I_ext. In a previous interactive demo
we saw how the system showed two different steady-states when w = 0. But when w
doe not equal 0, for some range of w the system shows three fixed points (the middle
one being unstable) and the steady state depends on the initial conditions (i.e.
r at time zero.).
More on this will be explained in the next section.
""";
###Output
_____no_output_____
###Markdown
--- SummaryIn this tutorial, we have investigated the dynamics of a rate-based single population of neurons.We learned about:- The effect of the input parameters and the time constant of the network on the dynamics of the population.- How to find the fixed point(s) of the system.Next, we have two Bonus, but important concepts in dynamical system analysis and simulation. If you have time left, watch the next video and proceed to solve the exercises. You will learn:- How to determine the stability of a fixed point by linearizing the system.- How to add realistic inputs to our model. --- Bonus 1: Stability of a fixed point
###Code
# @title Video 3: Stability of fixed points
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="KKMlWWU83Jg", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Initial values and trajectoriesHere, let us first set $w=5.0$ and $I_{\text{ext}}=0.5$, and investigate the dynamics of $r(t)$ starting with different initial values $r(0) \equiv r_{\text{init}}$. We will plot the trajectories of $r(t)$ with $r_{\text{init}} = 0.0, 0.1, 0.2,..., 0.9$.
###Code
# @markdown Execute this cell to see the trajectories!
pars = default_pars_single()
pars['w'] = 5.0
pars['I_ext'] = 0.5
plt.figure(figsize=(8, 5))
for ie in range(10):
pars['r_init'] = 0.1 * ie # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie))
plt.xlabel('t (ms)')
plt.title('Two steady states?')
plt.ylabel(r'$r$(t)')
plt.legend(loc=[1.01, -0.06], fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
Interactive Demo: dynamics as a function of the initial valueLet's now set $r_{\rm init}$ to a value of your choice in this demo. How does the solution change? What do you observe?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
pars = default_pars_single(w=5.0, I_ext=0.5)
def plot_single_diffEinit(r_init):
pars['r_init'] = r_init
r = simulate_single(pars)
plt.figure()
plt.plot(pars['range_t'], r, 'b', zorder=1)
plt.plot(0, r[0], 'bo', alpha=0.7, zorder=2)
plt.xlabel('t (ms)', fontsize=16)
plt.ylabel(r'$r(t)$', fontsize=16)
plt.ylim(0, 1.0)
plt.show()
_ = widgets.interact(plot_single_diffEinit, r_init=(0, 1, 0.02))
# to_remove explanation
"""
Discussion:
To better appreciate what is happening here, you should go back to the previous
interactive demo. Set the w = 5 and I_ext = 0.5.
You will find that there are three fixed points of the system for these values of
w and I_ext. Now, choose the initial value in this demo and see in which direction
the system output moves. When r_init is in the vicinity of the leftmost fixed points
it moves towards the left most fixed point. When r_init is in the vicinity of the
rightmost fixed points it moves towards the rightmost fixed point.
""";
###Output
_____no_output_____
###Markdown
Stability analysis via linearization of the dynamicsJust like Equation $1$ in the case ($w=0$) discussed above, a generic linear system $$\frac{dx}{dt} = \lambda (x - b),$$ has a fixed point for $x=b$. The analytical solution of such a system can be found to be:$$x(t) = b + \big{(} x(0) - b \big{)} \text{e}^{\lambda t}.$$ Now consider a small perturbation of the activity around the fixed point: $x(0) = b+ \epsilon$, where $|\epsilon| \ll 1$. Will the perturbation $\epsilon(t)$ grow with time or will it decay to the fixed point? The evolution of the perturbation with time can be written, using the analytical solution for $x(t)$, as: $$\epsilon (t) = x(t) - b = \epsilon \text{e}^{\lambda t}$$- if $\lambda < 0$, $\epsilon(t)$ decays to zero, $x(t)$ will still converge to $b$ and the fixed point is "**stable**".- if $\lambda > 0$, $\epsilon(t)$ grows with time, $x(t)$ will leave the fixed point $b$ exponentially, and the fixed point is, therefore, "**unstable**" . Compute the stability of Equation $1$Similar to what we did in the linear system above, in order to determine the stability of a fixed point $r^{*}$ of the excitatory population dynamics, we perturb Equation (1) around $r^{*}$ by $\epsilon$, i.e. $r = r^{*} + \epsilon$. We can plug in Equation (1) and obtain the equation determining the time evolution of the perturbation $\epsilon(t)$:\begin{align}\tau \frac{d\epsilon}{dt} \approx -\epsilon + w F'(w\cdot r^{*} + I_{\text{ext}};a,\theta) \epsilon \end{align}where $F'(\cdot)$ is the derivative of the transfer function $F(\cdot)$. We can rewrite the above equation as:\begin{align}\frac{d\epsilon}{dt} \approx \frac{\epsilon}{\tau }[-1 + w F'(w\cdot r^* + I_{\text{ext}};a,\theta)] \end{align}That is, as in the linear system above, the value of$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau \qquad (4)$$determines whether the perturbation will grow or decay to zero, i.e., $\lambda$ defines the stability of the fixed point. This value is called the **eigenvalue** of the dynamical system. Exercise 4: Compute $dF$The derivative of the sigmoid transfer function is:\begin{align} \frac{dF}{dx} & = \frac{d}{dx} (1+\exp\{-a(x-\theta)\})^{-1} \\& = a\exp\{-a(x-\theta)\} (1+\exp\{-a(x-\theta)\})^{-2}. \qquad (5)\end{align}Let's now find the expression for the derivative $\displaystyle{\frac{dF}{dx}}$ in the following cell and plot it.
###Code
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
###########################################################################
# TODO for students: compute dFdx ##
raise NotImplementedError("Student excercise: compute the deravitive of F")
###########################################################################
# Calculate the population activation
dFdx = ...
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# df = dF(x, pars['a'], pars['theta'])
# plot_dFdt(x, df)
# to_remove solution
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
# Calculate the population activation
dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
df = dF(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_dFdt(x, df)
###Output
_____no_output_____
###Markdown
Exercise 5: Compute eigenvaluesAs discussed above, for the case with $w=5.0$ and $I_{\text{ext}}=0.5$, the system displays **three** fixed points. However, when we simulated the dynamics and varied the initial conditions $r_{\rm init}$, we could only obtain **two** steady states. In this exercise, we will now check the stability of each of the three fixed points by calculating the corresponding eigenvalues with the function `eig_single`. Check the sign of each eigenvalue (i.e., stability of each fixed point). How many of the fixed points are stable?Note that the expression of the eigenvalue at fixed point $r^*$$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau$$
###Code
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
#####################################################################
## TODO for students: compute eigenvalue and disable the error
raise NotImplementedError("Student excercise: compute the eigenvalue")
######################################################################
# Compute the eigenvalue
eig = ...
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
# Uncomment below lines after completing the eig_single function.
# for fp in x_fp:
# eig_fp = eig_single(fp, **pars)
# print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
**SAMPLE OUTPUT**```Fixed point1 at 0.042 with Eigenvalue=-0.583Fixed point2 at 0.447 with Eigenvalue=0.498Fixed point3 at 0.900 with Eigenvalue=-0.626```
###Code
# to_remove solution
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
# Compute the eigenvalue
eig = (-1. + w * dF(w * fp + I_ext, a, theta)) / tau
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
for fp in x_fp:
eig_fp = eig_single(fp, **pars)
print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
Think! Throughout the tutorial, we have assumed $w> 0 $, i.e., we considered a single population of **excitatory** neurons. What do you think will be the behavior of a population of inhibitory neurons, i.e., where $w> 0$ is replaced by $w< 0$?
###Code
# to_remove explanation
"""
Discussion:
You can check this by going back the second last interactive demo and set the
weight to w<0. You will notice that the system has only one fixed point and that
is at zero value. For this particular dynamics, the system will eventually converge
to zero. But try it out.
""";
###Output
_____no_output_____
###Markdown
--- Bonus 2: Noisy input drives the transition between two stable states Ornstein-Uhlenbeck (OU) processAs discussed in several previous tutorials, the OU process is usually used to generate a noisy input into the neuron. The OU input $\eta(t)$ follows: $$\tau_\eta \frac{d}{dt}\eta(t) = -\eta (t) + \sigma_\eta\sqrt{2\tau_\eta}\xi(t)$$Execute the following function `my_OU(pars, sig, myseed=False)` to generate an OU process.
###Code
# @title OU process `my_OU(pars, sig, myseed=False)`
# @markdown Make sure you execute this cell to visualize the noise!
def my_OU(pars, sig, myseed=False):
"""
A functions that generates Ornstein-Uhlenback process
Args:
pars : parameter dictionary
sig : noise amplitute
myseed : random seed. int or boolean
Returns:
I : Ornstein-Uhlenbeck input current
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
tau_ou = pars['tau_ou'] # [ms]
# set random seed
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# Initialize
noise = np.random.randn(Lt)
I_ou = np.zeros(Lt)
I_ou[0] = noise[0] * sig
# generate OU
for it in range(Lt - 1):
I_ou[it + 1] = (I_ou[it]
+ dt / tau_ou * (0. - I_ou[it])
+ np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1])
return I_ou
pars = default_pars_single(T=100)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
I_ou = my_OU(pars, sig=sig_ou, myseed=2020)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], I_ou, 'r')
plt.xlabel('t (ms)')
plt.ylabel(r'$I_{\mathrm{OU}}$')
plt.show()
###Output
_____no_output_____
###Markdown
Example: Up-Down transitionIn the presence of two or more fixed points, noisy inputs can drive a transition between the fixed points! Here, we stimulate an E population for 1,000 ms applying OU inputs.
###Code
# @title Simulation of an E population with OU inputs
# @markdown Make sure you execute this cell to spot the Up-Down states!
pars = default_pars_single(T=1000)
pars['w'] = 5.0
sig_ou = 0.7
pars['tau_ou'] = 1. # [ms]
pars['I_ext'] = 0.56 + my_OU(pars, sig=sig_ou, myseed=2020)
r = simulate_single(pars)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], r, 'b', alpha=0.8)
plt.xlabel('t (ms)')
plt.ylabel(r'$r(t)$')
plt.show()
###Output
_____no_output_____
###Markdown
Tutorial 1: Neural Rate Models**Week 2, Day 4: Dynamic Networks****By Neuromatch Academy**__Content creators:__ Qinglong Gu, Songtin Li, Arvind Kumar, John Murray, Julijana Gjorgjieva __Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Spiros Chavlis, Michael Waskom **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThe brain is a complex system, not because it is composed of a large number of diverse types of neurons, but mainly because of how neurons are connected to each other. The brain is indeed a network of highly specialized neuronal networks. The activity of a neural network constantly evolves in time. For this reason, neurons can be modeled as dynamical systems. The dynamical system approach is only one of the many modeling approaches that computational neuroscientists have developed (other points of view include information processing, statistical models, etc.). How the dynamics of neuronal networks affect the representation and processing of information in the brain is an open question. However, signatures of altered brain dynamics present in many brain diseases (e.g., in epilepsy or Parkinson's disease) tell us that it is crucial to study network activity dynamics if we want to understand the brain.In this tutorial, we will simulate and study one of the simplest models of biological neuronal networks. Instead of modeling and simulating individual excitatory neurons (e.g., LIF models that you implemented yesterday), we will treat them as a single homogeneous population and approximate their dynamics using a single one-dimensional equation describing the evolution of their average spiking rate in time.In this tutorial, we will learn how to build a firing rate model of a single population of excitatory neurons. **Steps:**- Write the equation for the firing rate dynamics of a 1D excitatory population.- Visualize the response of the population as a function of parameters such as threshold level and gain, using the frequency-current (F-I) curve.- Numerically simulate the dynamics of the excitatory population and find the fixed points of the system. - Investigate the stability of the fixed points by linearizing the dynamics around them. --- Setup
###Code
# Imports
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt # root-finding algorithm
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Helper functions
def plot_fI(x, f):
plt.figure(figsize=(6, 4)) # plot the figure
plt.plot(x, f, 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
def plot_dr_r(r, drdt, x_fps=None):
plt.figure()
plt.plot(r, drdt, 'k')
plt.plot(r, 0. * r, 'k--')
if x_fps is not None:
plt.plot(x_fps, np.zeros_like(x_fps), "ko", ms=12)
plt.xlabel(r'$r$')
plt.ylabel(r'$\frac{dr}{dt}$', fontsize=20)
plt.ylim(-0.1, 0.1)
def plot_dFdt(x, dFdt):
plt.figure()
plt.plot(x, dFdt, 'r')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('dF(x)', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Neuronal network dynamics
###Code
# @title Video 1: Dynamic networks
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="p848349hPyw", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Section 1.1: Dynamics of a single excitatory populationIndividual neurons respond by spiking. When we average the spikes of neurons in a population, we can define the average firing activity of the population. In this model, we are interested in how the population-averaged firing varies as a function of time and network parameters. Mathematically, we can describe the firing rate dynamic as:\begin{align}\tau \frac{dr}{dt} &= -r + F(w\cdot r + I_{\text{ext}}) \quad\qquad (1)\end{align}$r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $w$ denotes the strength (synaptic weight) of the recurrent input to the population, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs.To start building the model, please execute the cell below to initialize the simulation parameters.
###Code
# @markdown *Execute this cell to set default parameters for a single excitatory population model*
def default_pars_single(**kwargs):
pars = {}
# Excitatory parameters
pars['tau'] = 1. # Timescale of the E population [ms]
pars['a'] = 1.2 # Gain of the E population
pars['theta'] = 2.8 # Threshold of the E population
# Connection strength
pars['w'] = 0. # E to E, we first set it to 0
# External input
pars['I_ext'] = 0.
# simulation parameters
pars['T'] = 20. # Total duration of simulation [ms]
pars['dt'] = .1 # Simulation time step [ms]
pars['r_init'] = 0.2 # Initial value of E
# External parameters if any
pars.update(kwargs)
# Vector of discretized time points [ms]
pars['range_t'] = np.arange(0, pars['T'], pars['dt'])
return pars
###Output
_____no_output_____
###Markdown
You can now use:- `pars = default_pars_single()` to get all the parameters, and then you can execute `print(pars)` to check these parameters. - `pars = default_pars_single(T=T_sim, dt=time_step)` to set new simulation time and time step- To update an existing parameter dictionary, use `pars['New_para'] = value`Because `pars` is a dictionary, it can be passed to a function that requires individual parameters as arguments using `my_func(**pars)` syntax. Section 1.2: F-I curvesIn electrophysiology, a neuron is often characterized by its spike rate output in response to input currents. This is often called the **F-I** curve, denoting the output spike frequency (**F**) in response to different injected currents (**I**). We estimated this for an LIF neuron in yesterday's tutorial.The transfer function $F(\cdot)$ in Equation $1$ represents the gain of the population as a function of the total input. The gain is often modeled as a sigmoidal function, i.e., more input drive leads to a nonlinear increase in the population firing rate. The output firing rate will eventually saturate for high input values. A sigmoidal $F(\cdot)$ is parameterized by its gain $a$ and threshold $\theta$.$$ F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}} \quad(2)$$The argument $x$ represents the input to the population. Note that the second term is chosen so that $F(0;a,\theta)=0$.Many other transfer functions (generally monotonic) can be also used. Examples are the rectified linear function $ReLU(x)$ or the hyperbolic tangent $tanh(x)$. Exercise 1: Implement F-I curve Let's first investigate the activation functions before simulating the dynamics of the entire population. In this exercise, you will implement a sigmoidal **F-I** curve or transfer function $F(x)$, with gain $a$ and threshold level $\theta$ as parameters.
###Code
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
#################################################
## TODO for students: compute f = F(x) ##
# Fill out function and remove
raise NotImplementedError("Student excercise: implement the f-I function")
#################################################
# Define the sigmoidal transfer function f = F(x)
f = ...
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# f = F(x, pars['a'], pars['theta'])
# plot_fI(x, f)
# to_remove solution
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
# Define the sigmoidal transfer function f = F(x)
f = (1 + np.exp(-a * (x - theta)))**-1 - (1 + np.exp(a * theta))**-1
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
f = F(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_fI(x, f)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter exploration of F-I curveHere's an interactive demo that shows how the F-I curve changes for different values of the gain and threshold parameters. How do the gain and threshold parameters affect the F-I curve?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def interactive_plot_FI(a, theta):
"""
Population activation function.
Expecxts:
a : the gain of the function
theta : the threshold of the function
Returns:
plot the F-I curve with give parameters
"""
# set the range of input
x = np.arange(0, 10, .1)
plt.figure()
plt.plot(x, F(x, a, theta), 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
_ = widgets.interact(interactive_plot_FI, a=(0.3, 3, 0.3), theta=(2, 4, 0.2))
# to_remove explanation
"""
Discussion:
For the function we have chosen to model the F-I curve (eq 2),
- a determines the slope (gain) of the rising phase of the F-I curve
- theta determines the input at which the function F(x) reaches its mid-value (0.5).
That is, theta shifts the F-I curve along the horizontal axis.
For our neurons we are using in this tutorial:
- a controls the gain of the neuron population
- theta controls the threshold at which the neuron population starts to respond
""";
###Output
_____no_output_____
###Markdown
Section 1.3: Simulation scheme of E dynamicsBecause $F(\cdot)$ is a nonlinear function, the exact solution of Equation $1$ can not be determined via analytical methods. Therefore, numerical methods must be used to find the solution. In practice, the derivative on the left-hand side of Equation $1$ can be approximated using the Euler method on a time-grid of stepsize $\Delta t$:\begin{align}&\frac{dr}{dt} \approx \frac{r[k+1]-r[k]}{\Delta t} \end{align}where $r[k] = r(k\Delta t)$. Thus,$$\Delta r[k] = \frac{\Delta t}{\tau}[-r[k] + F(w\cdot r[k] + I_{\text{ext}}[k];a,\theta)]$$Hence, Equation (1) is updated at each time step by:$$r[k+1] = r[k] + \Delta r[k]$$
###Code
# @markdown *Execute this cell to enable the single population rate model simulator: `simulate_single`*
def simulate_single(pars):
"""
Simulate an excitatory population of neurons
Args:
pars : Parameter dictionary
Returns:
rE : Activity of excitatory population (array)
Example:
pars = default_pars_single()
r = simulate_single(pars)
"""
# Set parameters
tau, a, theta = pars['tau'], pars['a'], pars['theta']
w = pars['w']
I_ext = pars['I_ext']
r_init = pars['r_init']
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Initialize activity
r = np.zeros(Lt)
r[0] = r_init
I_ext = I_ext * np.ones(Lt)
# Update the E activity
for k in range(Lt - 1):
dr = dt / tau * (-r[k] + F(w * r[k] + I_ext[k], a, theta))
r[k+1] = r[k] + dr
return r
help(simulate_single)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter Exploration of single population dynamicsNote that $w=0$, as in the default setting, means no recurrent input to the neuron population in Equation (1). Hence, the dynamics are entirely determined by the external input $I_{\text{ext}}$. Explore these dynamics in this interactive demo.How does $r_{\text{sim}}(t)$ change with different $I_{\text{ext}}$ values? How does it change with different $\tau$ values? Investigate the relationship between $F(I_{\text{ext}}; a, \theta)$ and the steady value of $r(t)$. Note that, $r_{\rm ana}(t)$ denotes the analytical solution - you will learn how this is computed in the next section.
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
# get default parameters
pars = default_pars_single(T=20.)
def Myplot_E_diffI_difftau(I_ext, tau):
# set external input and time constant
pars['I_ext'] = I_ext
pars['tau'] = tau
# simulation
r = simulate_single(pars)
# Analytical Solution
r_ana = (pars['r_init']
+ (F(I_ext, pars['a'], pars['theta'])
- pars['r_init']) * (1. - np.exp(-pars['range_t'] / pars['tau'])))
# plot
plt.figure()
plt.plot(pars['range_t'], r, 'b', label=r'$r_{\mathrm{sim}}$(t)', alpha=0.5,
zorder=1)
plt.plot(pars['range_t'], r_ana, 'b--', lw=5, dashes=(2, 2),
label=r'$r_{\mathrm{ana}}$(t)', zorder=2)
plt.plot(pars['range_t'],
F(I_ext, pars['a'], pars['theta']) * np.ones(pars['range_t'].size),
'k--', label=r'$F(I_{\mathrm{ext}})$')
plt.xlabel('t (ms)', fontsize=16.)
plt.ylabel('Activity r(t)', fontsize=16.)
plt.legend(loc='best', fontsize=14.)
plt.show()
_ = widgets.interact(Myplot_E_diffI_difftau, I_ext=(0.0, 10., 1.),
tau=(1., 5., 0.2))
# to_remove explanation
"""
Discussion:
Given the choice of F-I curve (eq 2) and dynamics of the neuron population (eq. 1)
the neurons have two fixed points or steady-state responses irrespective of the input.
- Weak inputs to the neurons eventually result in the activity converging to zero
- Strong inputs to the neurons eventually result in the activity converging to max value
The time constant tau, does not affect the steady-state response but it determines
the time the neurons take to reach to their fixed point.
""";
###Output
_____no_output_____
###Markdown
Think!Above, we have numerically solved a system driven by a positive input. Yet, $r_E(t)$ either decays to zero or reaches a fixed non-zero value.- Why doesn't the solution of the system "explode" in a finite time? In other words, what guarantees that $r_E$(t) stays finite? - Which parameter would you change in order to increase the maximum value of the response?
###Code
# to_remove explanation
"""
Discussion:
1) As the F-I curve is bounded between zero and one, the system doesn't explode.
The f-curve guarantees this property
2) One way to increase the maximum response is to change the f-I curve. For
example, the ReLU is an unbounded function, and thus will increase the overall maximal
response of the network.
""";
###Output
_____no_output_____
###Markdown
--- Section 2: Fixed points of the single population system
###Code
# @title Video 2: Fixed point
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Ox3ELd1UFyo", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
As you varied the two parameters in the last Interactive Demo, you noticed that, while at first the system output quickly changes, with time, it reaches its maximum/minimum value and does not change anymore. The value eventually reached by the system is called the **steady state** of the system, or the **fixed point**. Essentially, in the steady states the derivative with respect to time of the activity ($r$) is zero, i.e. $\displaystyle \frac{dr}{dt}=0$. We can find that the steady state of the Equation. (1) by setting $\displaystyle{\frac{dr}{dt}=0}$ and solve for $r$:$$-r_{\text{steady}} + F(w\cdot r_{\text{steady}} + I_{\text{ext}};a,\theta) = 0, \qquad (3)$$When it exists, the solution of Equation. (3) defines a **fixed point** of the dynamical system in Equation (1). Note that if $F(x)$ is nonlinear, it is not always possible to find an analytical solution, but the solution can be found via numerical simulations, as we will do later.From the Interactive Demo, one could also notice that the value of $\tau$ influences how quickly the activity will converge to the steady state from its initial value. In the specific case of $w=0$, we can also analytically compute the solution of Equation (1) (i.e., the thick blue dashed line) and deduce the role of $\tau$ in determining the convergence to the fixed point: $$\displaystyle{r(t) = \big{[}F(I_{\text{ext}};a,\theta) -r(t=0)\big{]} (1-\text{e}^{-\frac{t}{\tau}})} + r(t=0)$$ \\We can now numerically calculate the fixed point with a root finding algorithm. Exercise 2: Visualization of the fixed pointsWhen it is not possible to find the solution for Equation (3) analytically, a graphical approach can be taken. To that end, it is useful to plot $\displaystyle{\frac{dr}{dt}}$ as a function of $r$. The values of $r$ for which the plotted function crosses zero on the y axis correspond to fixed points. Here, let us, for example, set $w=5.0$ and $I^{\text{ext}}=0.5$. From Equation (1), you can obtain$$\frac{dr}{dt} = [-r + F(w\cdot r + I^{\text{ext}})]\,/\,\tau $$Then, plot the $dr/dt$ as a function of $r$, and check for the presence of fixed points.
###Code
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
#########################################################################
# TODO compute drdt and disable the error
raise NotImplementedError("Finish the compute_drdt function")
#########################################################################
# Calculate drdt
drdt = ...
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Uncomment to test your function
# drdt = compute_drdt(r, **pars)
# plot_dr_r(r, drdt)
# to_remove solution
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
# Calculate drdt
drdt = (-r + F(w * r + I_ext, a, theta)) / tau
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
with plt.xkcd():
plot_dr_r(r, drdt)
###Output
_____no_output_____
###Markdown
Exercise 3: Fixed point calculationWe will now find the fixed points numerically. To do so, we need to specif initial values ($r_{\text{guess}}$) for the root-finding algorithm to start from. From the line $\displaystyle{\frac{dr}{dt}}$ plotted above in Exercise 2, initial values can be chosen as a set of values close to where the line crosses zero on the y axis (real fixed point).The next cell defines three helper functions that we will use:- `my_fp_single(r_guess, **pars)` uses a root-finding algorithm to locate a fixed point near a given initial value- `check_fp_single(x_fp, **pars)`, verifies that the values of $r_{\rm fp}$ for which $\displaystyle{\frac{dr}{dt}} = 0$ are the true fixed points- `my_fp_finder(r_guess_vector, **pars)` accepts an array of initial values and finds the same number of fixed points, using the above two functions
###Code
# @markdown *Execute this cell to enable the fixed point functions*
def my_fp_single(r_guess, a, theta, w, I_ext, **other_pars):
"""
Calculate the fixed point through drE/dt=0
Args:
r_guess : Initial value used for scipy.optimize function
a, theta, w, I_ext : simulation parameters
Returns:
x_fp : value of fixed point
"""
# define the right hand of E dynamics
def my_WCr(x):
r = x
drdt = (-r + F(w * r + I_ext, a, theta))
y = np.array(drdt)
return y
x0 = np.array(r_guess)
x_fp = opt.root(my_WCr, x0).x.item()
return x_fp
def check_fp_single(x_fp, a, theta, w, I_ext, mytol=1e-4, **other_pars):
"""
Verify |dr/dt| < mytol
Args:
fp : value of fixed point
a, theta, w, I_ext: simulation parameters
mytol : tolerance, default as 10^{-4}
Returns :
Whether it is a correct fixed point: True/False
"""
# calculate Equation(3)
y = x_fp - F(w * x_fp + I_ext, a, theta)
# Here we set tolerance as 10^{-4}
return np.abs(y) < mytol
def my_fp_finder(pars, r_guess_vector, mytol=1e-4):
"""
Calculate the fixed point(s) through drE/dt=0
Args:
pars : Parameter dictionary
r_guess_vector : Initial values used for scipy.optimize function
mytol : tolerance for checking fixed point, default as 10^{-4}
Returns:
x_fps : values of fixed points
"""
x_fps = []
correct_fps = []
for r_guess in r_guess_vector:
x_fp = my_fp_single(r_guess, **pars)
if check_fp_single(x_fp, **pars, mytol=mytol):
x_fps.append(x_fp)
return x_fps
help(my_fp_finder)
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
#############################################################################
# TODO for students:
# Define initial values close to the intersections of drdt and y=0
# (How many initial values? Hint: How many times do the two lines intersect?)
# Calculate the fixed point with these initial values and plot them
#############################################################################
r_guess_vector = [...]
# Uncomment to test your values
# x_fps = my_fp_finder(pars, r_guess_vector)
# plot_dr_r(r, drdt, x_fps)
# to_remove solution
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
r_guess_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_guess_vector)
with plt.xkcd():
plot_dr_r(r, drdt, x_fps)
###Output
_____no_output_____
###Markdown
Interactive Demo: fixed points as a function of recurrent and external inputs.You can now explore how the previous plot changes when the recurrent coupling $w$ and the external input $I_{\text{ext}}$ take different values. How does the number of fixed points change?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def plot_intersection_single(w, I_ext):
# set your parameters
pars = default_pars_single(w=w, I_ext=I_ext)
# find fixed points
r_init_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_init_vector)
# plot
r = np.linspace(0, 1., 1000)
drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']
plot_dr_r(r, drdt, x_fps)
_ = widgets.interact(plot_intersection_single, w=(1, 7, 0.2),
I_ext=(0, 3, 0.1))
# to_remove explanation
"""
Discussion:
The fixed points of the single excitatory neuron population are determined by both
recurrent connections w and external input I_ext. In a previous interactive demo
we saw how the system showed two different steady-states when w = 0. But when w
doe not equal 0, for some range of w the system shows three fixed points (the middle
one being unstable) and the steady state depends on the initial conditions (i.e.
r at time zero.).
More on this will be explained in the next section.
""";
###Output
_____no_output_____
###Markdown
--- SummaryIn this tutorial, we have investigated the dynamics of a rate-based single population of neurons.We learned about:- The effect of the input parameters and the time constant of the network on the dynamics of the population.- How to find the fixed point(s) of the system.Next, we have two Bonus, but important concepts in dynamical system analysis and simulation. If you have time left, watch the next video and proceed to solve the exercises. You will learn:- How to determine the stability of a fixed point by linearizing the system.- How to add realistic inputs to our model. --- Bonus 1: Stability of a fixed point
###Code
# @title Video 3: Stability of fixed points
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="KKMlWWU83Jg", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Initial values and trajectoriesHere, let us first set $w=5.0$ and $I_{\text{ext}}=0.5$, and investigate the dynamics of $r(t)$ starting with different initial values $r(0) \equiv r_{\text{init}}$. We will plot the trajectories of $r(t)$ with $r_{\text{init}} = 0.0, 0.1, 0.2,..., 0.9$.
###Code
# @markdown Execute this cell to see the trajectories!
pars = default_pars_single()
pars['w'] = 5.0
pars['I_ext'] = 0.5
plt.figure(figsize=(8, 5))
for ie in range(10):
pars['r_init'] = 0.1 * ie # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie))
plt.xlabel('t (ms)')
plt.title('Two steady states?')
plt.ylabel(r'$r$(t)')
plt.legend(loc=[1.01, -0.06], fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
Interactive Demo: dynamics as a function of the initial valueLet's now set $r_{\rm init}$ to a value of your choice in this demo. How does the solution change? What do you observe?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
pars = default_pars_single(w=5.0, I_ext=0.5)
def plot_single_diffEinit(r_init):
pars['r_init'] = r_init
r = simulate_single(pars)
plt.figure()
plt.plot(pars['range_t'], r, 'b', zorder=1)
plt.plot(0, r[0], 'bo', alpha=0.7, zorder=2)
plt.xlabel('t (ms)', fontsize=16)
plt.ylabel(r'$r(t)$', fontsize=16)
plt.ylim(0, 1.0)
plt.show()
_ = widgets.interact(plot_single_diffEinit, r_init=(0, 1, 0.02))
# to_remove explanation
"""
Discussion:
To better appreciate what is happening here, you should go back to the previous
interactive demo. Set the w = 5 and I_ext = 0.5.
You will find that there are three fixed points of the system for these values of
w and I_ext. Now, choose the initial value in this demo and see in which direction
the system output moves. When r_init is in the vicinity of the leftmost fixed points
it moves towards the left most fixed point. When r_init is in the vicinity of the
rightmost fixed points it moves towards the rightmost fixed point.
""";
###Output
_____no_output_____
###Markdown
Stability analysis via linearization of the dynamicsJust like Equation $1$ in the case ($w=0$) discussed above, a generic linear system $$\frac{dx}{dt} = \lambda (x - b),$$ has a fixed point for $x=b$. The analytical solution of such a system can be found to be:$$x(t) = b + \big{(} x(0) - b \big{)} \text{e}^{\lambda t}.$$ Now consider a small perturbation of the activity around the fixed point: $x(0) = b+ \epsilon$, where $|\epsilon| \ll 1$. Will the perturbation $\epsilon(t)$ grow with time or will it decay to the fixed point? The evolution of the perturbation with time can be written, using the analytical solution for $x(t)$, as: $$\epsilon (t) = x(t) - b = \epsilon \text{e}^{\lambda t}$$- if $\lambda < 0$, $\epsilon(t)$ decays to zero, $x(t)$ will still converge to $b$ and the fixed point is "**stable**".- if $\lambda > 0$, $\epsilon(t)$ grows with time, $x(t)$ will leave the fixed point $b$ exponentially, and the fixed point is, therefore, "**unstable**" . Compute the stability of Equation $1$Similar to what we did in the linear system above, in order to determine the stability of a fixed point $r^{*}$ of the excitatory population dynamics, we perturb Equation (1) around $r^{*}$ by $\epsilon$, i.e. $r = r^{*} + \epsilon$. We can plug in Equation (1) and obtain the equation determining the time evolution of the perturbation $\epsilon(t)$:\begin{align}\tau \frac{d\epsilon}{dt} \approx -\epsilon + w F'(w\cdot r^{*} + I_{\text{ext}};a,\theta) \epsilon \end{align}where $F'(\cdot)$ is the derivative of the transfer function $F(\cdot)$. We can rewrite the above equation as:\begin{align}\frac{d\epsilon}{dt} \approx \frac{\epsilon}{\tau }[-1 + w F'(w\cdot r^* + I_{\text{ext}};a,\theta)] \end{align}That is, as in the linear system above, the value of$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau \qquad (4)$$determines whether the perturbation will grow or decay to zero, i.e., $\lambda$ defines the stability of the fixed point. This value is called the **eigenvalue** of the dynamical system. Exercise 4: Compute $dF$The derivative of the sigmoid transfer function is:\begin{align} \frac{dF}{dx} & = \frac{d}{dx} (1+\exp\{-a(x-\theta)\})^{-1} \\& = a\exp\{-a(x-\theta)\} (1+\exp\{-a(x-\theta)\})^{-2}. \qquad (5)\end{align}Let's now find the expression for the derivative $\displaystyle{\frac{dF}{dx}}$ in the following cell and plot it.
###Code
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
###########################################################################
# TODO for students: compute dFdx ##
raise NotImplementedError("Student excercise: compute the deravitive of F")
###########################################################################
# Calculate the population activation
dFdx = ...
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# df = dF(x, pars['a'], pars['theta'])
# plot_dFdt(x, df)
# to_remove solution
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
# Calculate the population activation
dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
df = dF(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_dFdt(x, df)
###Output
_____no_output_____
###Markdown
Exercise 5: Compute eigenvaluesAs discussed above, for the case with $w=5.0$ and $I_{\text{ext}}=0.5$, the system displays **three** fixed points. However, when we simulated the dynamics and varied the initial conditions $r_{\rm init}$, we could only obtain **two** steady states. In this exercise, we will now check the stability of each of the three fixed points by calculating the corresponding eigenvalues with the function `eig_single`. Check the sign of each eigenvalue (i.e., stability of each fixed point). How many of the fixed points are stable?Note that the expression of the eigenvalue at fixed point $r^*$$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau$$
###Code
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
#####################################################################
## TODO for students: compute eigenvalue and disable the error
raise NotImplementedError("Student excercise: compute the eigenvalue")
######################################################################
# Compute the eigenvalue
eig = ...
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
# Uncomment below lines after completing the eig_single function.
# for fp in x_fp:
# eig_fp = eig_single(fp, **pars)
# print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
**SAMPLE OUTPUT**```Fixed point1 at 0.042 with Eigenvalue=-0.583Fixed point2 at 0.447 with Eigenvalue=0.498Fixed point3 at 0.900 with Eigenvalue=-0.626```
###Code
# to_remove solution
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
# Compute the eigenvalue
eig = (-1. + w * dF(w * fp + I_ext, a, theta)) / tau
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
for fp in x_fp:
eig_fp = eig_single(fp, **pars)
print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
Think! Throughout the tutorial, we have assumed $w> 0 $, i.e., we considered a single population of **excitatory** neurons. What do you think will be the behavior of a population of inhibitory neurons, i.e., where $w> 0$ is replaced by $w< 0$?
###Code
# to_remove explanation
"""
Discussion:
You can check this by going back the second last interactive demo and set the
weight to w<0. You will notice that the system has only one fixed point and that
is at zero value. For this particular dynamics, the system will eventually converge
to zero. But try it out.
""";
###Output
_____no_output_____
###Markdown
--- Bonus 2: Noisy input drives the transition between two stable states Ornstein-Uhlenbeck (OU) processAs discussed in several previous tutorials, the OU process is usually used to generate a noisy input into the neuron. The OU input $\eta(t)$ follows: $$\tau_\eta \frac{d}{dt}\eta(t) = -\eta (t) + \sigma_\eta\sqrt{2\tau_\eta}\xi(t)$$Execute the following function `my_OU(pars, sig, myseed=False)` to generate an OU process.
###Code
# @title OU process `my_OU(pars, sig, myseed=False)`
# @markdown Make sure you execute this cell to visualize the noise!
def my_OU(pars, sig, myseed=False):
"""
A functions that generates Ornstein-Uhlenback process
Args:
pars : parameter dictionary
sig : noise amplitute
myseed : random seed. int or boolean
Returns:
I : Ornstein-Uhlenbeck input current
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
tau_ou = pars['tau_ou'] # [ms]
# set random seed
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# Initialize
noise = np.random.randn(Lt)
I_ou = np.zeros(Lt)
I_ou[0] = noise[0] * sig
# generate OU
for it in range(Lt - 1):
I_ou[it + 1] = (I_ou[it]
+ dt / tau_ou * (0. - I_ou[it])
+ np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1])
return I_ou
pars = default_pars_single(T=100)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
I_ou = my_OU(pars, sig=sig_ou, myseed=2020)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], I_ou, 'r')
plt.xlabel('t (ms)')
plt.ylabel(r'$I_{\mathrm{OU}}$')
plt.show()
###Output
_____no_output_____
###Markdown
Example: Up-Down transitionIn the presence of two or more fixed points, noisy inputs can drive a transition between the fixed points! Here, we stimulate an E population for 1,000 ms applying OU inputs.
###Code
# @title Simulation of an E population with OU inputs
# @markdown Make sure you execute this cell to spot the Up-Down states!
pars = default_pars_single(T=1000)
pars['w'] = 5.0
sig_ou = 0.7
pars['tau_ou'] = 1. # [ms]
pars['I_ext'] = 0.56 + my_OU(pars, sig=sig_ou, myseed=2020)
r = simulate_single(pars)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], r, 'b', alpha=0.8)
plt.xlabel('t (ms)')
plt.ylabel(r'$r(t)$')
plt.show()
###Output
_____no_output_____
###Markdown
Tutorial 1: Neural Rate Models**Week 2, Day 4: Dynamic Networks****By Neuromatch Academy**__Content creators:__ Qinglong Gu, Songtin Li, Arvind Kumar, John Murray, Julijana Gjorgjieva __Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Spiros Chavlis, Michael Waskom, Siddharth Suresh **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial Objectives*Estimated timing of tutorial: 1 hour, 25 minutes*The brain is a complex system, not because it is composed of a large number of diverse types of neurons, but mainly because of how neurons are connected to each other. The brain is indeed a network of highly specialized neuronal networks. The activity of a neural network constantly evolves in time. For this reason, neurons can be modeled as dynamical systems. The dynamical system approach is only one of the many modeling approaches that computational neuroscientists have developed (other points of view include information processing, statistical models, etc.). How the dynamics of neuronal networks affect the representation and processing of information in the brain is an open question. However, signatures of altered brain dynamics present in many brain diseases (e.g., in epilepsy or Parkinson's disease) tell us that it is crucial to study network activity dynamics if we want to understand the brain.In this tutorial, we will simulate and study one of the simplest models of biological neuronal networks. Instead of modeling and simulating individual excitatory neurons (e.g., LIF models that you implemented yesterday), we will treat them as a single homogeneous population and approximate their dynamics using a single one-dimensional equation describing the evolution of their average spiking rate in time.In this tutorial, we will learn how to build a firing rate model of a single population of excitatory neurons. **Steps:**- Write the equation for the firing rate dynamics of a 1D excitatory population.- Visualize the response of the population as a function of parameters such as threshold level and gain, using the frequency-current (F-I) curve.- Numerically simulate the dynamics of the excitatory population and find the fixed points of the system.
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/nvuty/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup
###Code
# Imports
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt # root-finding algorithm
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Plotting Functions
def plot_fI(x, f):
plt.figure(figsize=(6, 4)) # plot the figure
plt.plot(x, f, 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
def plot_dr_r(r, drdt, x_fps=None):
plt.figure()
plt.plot(r, drdt, 'k')
plt.plot(r, 0. * r, 'k--')
if x_fps is not None:
plt.plot(x_fps, np.zeros_like(x_fps), "ko", ms=12)
plt.xlabel(r'$r$')
plt.ylabel(r'$\frac{dr}{dt}$', fontsize=20)
plt.ylim(-0.1, 0.1)
def plot_dFdt(x, dFdt):
plt.figure()
plt.plot(x, dFdt, 'r')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('dF(x)', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Neuronal network dynamics
###Code
# @title Video 1: Dynamic networks
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1dh411o7qJ", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="p848349hPyw", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This video covers how to model a network with a single population of neurons and introduces neural rate-based models. It overviews feedforward networks and defines the F-I (firing rate vs. input) curve. Section 1.1: Dynamics of a single excitatory population Click here for text recap of relevant part of video Individual neurons respond by spiking. When we average the spikes of neurons in a population, we can define the average firing activity of the population. In this model, we are interested in how the population-averaged firing varies as a function of time and network parameters. Mathematically, we can describe the firing rate dynamic of a feed-forward network as:\begin{align}\tau \frac{dr}{dt} &= -r + F(I_{\text{ext}}) \quad\qquad (1)\end{align}$r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs.To start building the model, please execute the cell below to initialize the simulation parameters.
###Code
# @markdown *Execute this cell to set default parameters for a single excitatory population model*
def default_pars_single(**kwargs):
pars = {}
# Excitatory parameters
pars['tau'] = 1. # Timescale of the E population [ms]
pars['a'] = 1.2 # Gain of the E population
pars['theta'] = 2.8 # Threshold of the E population
# Connection strength
pars['w'] = 0. # E to E, we first set it to 0
# External input
pars['I_ext'] = 0.
# simulation parameters
pars['T'] = 20. # Total duration of simulation [ms]
pars['dt'] = .1 # Simulation time step [ms]
pars['r_init'] = 0.2 # Initial value of E
# External parameters if any
pars.update(kwargs)
# Vector of discretized time points [ms]
pars['range_t'] = np.arange(0, pars['T'], pars['dt'])
return pars
pars = default_pars_single()
print(pars)
###Output
_____no_output_____
###Markdown
You can now use:- `pars = default_pars_single()` to get all the parameters. - `pars = default_pars_single(T=T_sim, dt=time_step)` to set new simulation time and time step- To update an existing parameter dictionary, use `pars['New_para'] = value`Because `pars` is a dictionary, it can be passed to a function that requires individual parameters as arguments using `my_func(**pars)` syntax. Section 1.2: F-I curves*Estimated timing to here from start of tutorial: 17 min* Click here for text recap of relevant part of video In electrophysiology, a neuron is often characterized by its spike rate output in response to input currents. This is often called the **F-I** curve, denoting the output spike frequency (**F**) in response to different injected currents (**I**). We estimated this for an LIF neuron in yesterday's tutorial.The transfer function $F(\cdot)$ in Equation $1$ represents the gain of the population as a function of the total input. The gain is often modeled as a sigmoidal function, i.e., more input drive leads to a nonlinear increase in the population firing rate. The output firing rate will eventually saturate for high input values. A sigmoidal $F(\cdot)$ is parameterized by its gain $a$ and threshold $\theta$.$$ F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}} \quad(2)$$The argument $x$ represents the input to the population. Note that the second term is chosen so that $F(0;a,\theta)=0$.Many other transfer functions (generally monotonic) can be also used. Examples are the rectified linear function $ReLU(x)$ or the hyperbolic tangent $tanh(x)$. Coding Exercise 1.2: Implement F-I curve Let's first investigate the activation functions before simulating the dynamics of the entire population. In this exercise, you will implement a sigmoidal **F-I** curve or transfer function $F(x)$, with gain $a$ and threshold level $\theta$ as parameters:$$ F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}}$$
###Code
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
#################################################
## TODO for students: compute f = F(x) ##
# Fill out function and remove
raise NotImplementedError("Student excercise: implement the f-I function")
#################################################
# Define the sigmoidal transfer function f = F(x)
f = ...
return f
# Set parameters
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Compute transfer function
f = F(x, pars['a'], pars['theta'])
# Visualize
plot_fI(x, f)
# to_remove solution
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
# Define the sigmoidal transfer function f = F(x)
f = (1 + np.exp(-a * (x - theta)))**-1 - (1 + np.exp(a * theta))**-1
return f
# Set parameters
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Compute transfer function
f = F(x, pars['a'], pars['theta'])
# Visualize
with plt.xkcd():
plot_fI(x, f)
###Output
_____no_output_____
###Markdown
Interactive Demo 1.2 : Parameter exploration of F-I curveHere's an interactive demo that shows how the F-I curve changes for different values of the gain and threshold parameters. 1. How does the gain parameter ($a$) affect the F-I curve?1. How does the threshold parameter ($\theta$) affect the F-I curve?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def interactive_plot_FI(a, theta):
"""
Population activation function.
Expecxts:
a : the gain of the function
theta : the threshold of the function
Returns:
plot the F-I curve with give parameters
"""
# set the range of input
x = np.arange(0, 10, .1)
plt.figure()
plt.plot(x, F(x, a, theta), 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
_ = widgets.interact(interactive_plot_FI, a=(0.3, 3, 0.3), theta=(2, 4, 0.2))
# to_remove explanation
"""
1) a determines the slope (gain) of the rising phase of the F-I curve
2) theta determines the input at which the function F(x) reaches its mid-value (0.5).
That is, theta shifts the F-I curve along the horizontal axis.
For our neurons we are using in this tutorial:
- a controls the gain of the neuron population
- theta controls the threshold at which the neuron population starts to respond
""";
###Output
_____no_output_____
###Markdown
Section 1.3: Simulation scheme of E dynamics*Estimated timing to here from start of tutorial: 27 min*Because $F(\cdot)$ is a nonlinear function, the exact solution of our differential equation of population activity can not be determined via analytical methods. As we have seen before, we can use numerical methods, specifically the Euler method, to find the solution (that is, simulate the population activity).
###Code
# @markdown *Execute this cell to enable the single population rate model simulator: `simulate_single`*
def simulate_single(pars):
"""
Simulate an excitatory population of neurons
Args:
pars : Parameter dictionary
Returns:
rE : Activity of excitatory population (array)
Example:
pars = default_pars_single()
r = simulate_single(pars)
"""
# Set parameters
tau, a, theta = pars['tau'], pars['a'], pars['theta']
w = pars['w']
I_ext = pars['I_ext']
r_init = pars['r_init']
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Initialize activity
r = np.zeros(Lt)
r[0] = r_init
I_ext = I_ext * np.ones(Lt)
# Update the E activity
for k in range(Lt - 1):
dr = dt / tau * (-r[k] + F(w * r[k] + I_ext[k], a, theta))
r[k+1] = r[k] + dr
return r
help(simulate_single)
###Output
_____no_output_____
###Markdown
Interactive Demo 1.3: Parameter Exploration of single population dynamicsExplore these dynamics of the population activity in this interactive demo.1. How does $r_{\text{sim}}(t)$ change with different $I_{\text{ext}}$ values? 2. How does it change with different $\tau$ values? Note that, $r_{\rm ana}(t)$ denotes the analytical solution - you will learn how this is computed in the next section.
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
# get default parameters
pars = default_pars_single(T=20.)
def Myplot_E_diffI_difftau(I_ext, tau):
# set external input and time constant
pars['I_ext'] = I_ext
pars['tau'] = tau
# simulation
r = simulate_single(pars)
# Analytical Solution
r_ana = (pars['r_init']
+ (F(I_ext, pars['a'], pars['theta'])
- pars['r_init']) * (1. - np.exp(-pars['range_t'] / pars['tau'])))
# plot
plt.figure()
plt.plot(pars['range_t'], r, 'b', label=r'$r_{\mathrm{sim}}$(t)', alpha=0.5,
zorder=1)
plt.plot(pars['range_t'], r_ana, 'b--', lw=5, dashes=(2, 2),
label=r'$r_{\mathrm{ana}}$(t)', zorder=2)
plt.plot(pars['range_t'],
F(I_ext, pars['a'], pars['theta']) * np.ones(pars['range_t'].size),
'k--', label=r'$F(I_{\mathrm{ext}})$')
plt.xlabel('t (ms)', fontsize=16.)
plt.ylabel('Activity r(t)', fontsize=16.)
plt.legend(loc='best', fontsize=14.)
plt.show()
_ = widgets.interact(Myplot_E_diffI_difftau, I_ext=(0.0, 10., 1.),
tau=(1., 5., 0.2))
# to_remove explanation
"""
1) Weak inputs to the neurons eventually result in the activity converging to zero.
Strong inputs to the neurons eventually result in the activity converging to max value
2) The time constant tau, does not affect the final response reached but it determines
the time the neurons take to reach to their fixed point.
""";
###Output
_____no_output_____
###Markdown
Think! 1.3: Finite activitiesAbove, we have numerically solved a system driven by a positive input. Yet, $r_E(t)$ either decays to zero or reaches a fixed non-zero value.1. Why doesn't the solution of the system "explode" in a finite time? In other words, what guarantees that $r_E$(t) stays finite? 2. Which parameter would you change in order to increase the maximum value of the response?
###Code
# to_remove explanation
"""
1) As the F-I curve is bounded between zero and one, the system doesn't explode.
The f-curve guarantees this property
2) One way to increase the maximum response is to change the f-I curve. For
example, the ReLU is an unbounded function, and thus will increase the overall maximal
response of the network.
""";
###Output
_____no_output_____
###Markdown
--- Section 2: Fixed points of the single population system*Estimated timing to here from start of tutorial: 45 min* Section 2.1: Finding fixed points
###Code
# @title Video 2: Fixed point
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1v54y1v7Gr", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Ox3ELd1UFyo", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This video introduces recurrent networks and how to derive their fixed points. It also introduces vector fields and phase planes in one dimension. Click here for text recap of video We can now extend our feed-forward network to a recurrent network, governed by the equation:\begin{align}\tau \frac{dr}{dt} &= -r + F(w\cdot r + I_{\text{ext}}) \quad\qquad (3)\end{align} where as before, $r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs. Now we also have $w$ which denotes the strength (synaptic weight) of the recurrent input to the population.As you varied the two parameters in the last Interactive Demo, you noticed that, while at first the system output quickly changes, with time, it reaches its maximum/minimum value and does not change anymore. The value eventually reached by the system is called the **steady state** of the system, or the **fixed point**. Essentially, in the steady states the derivative with respect to time of the activity ($r$) is zero, i.e. $\displaystyle \frac{dr}{dt}=0$. We can find that the steady state of the Equation. (1) by setting $\displaystyle{\frac{dr}{dt}=0}$ and solve for $r$:$$-r_{\text{steady}} + F(w\cdot r_{\text{steady}} + I_{\text{ext}};a,\theta) = 0, \qquad (4)$$When it exists, the solution of Equation. (4) defines a **fixed point** of the dynamical system in Equation (3). Note that if $F(x)$ is nonlinear, it is not always possible to find an analytical solution, but the solution can be found via numerical simulations, as we will do later.From the Interactive Demo, one could also notice that the value of $\tau$ influences how quickly the activity will converge to the steady state from its initial value. In the specific case of $w=0$, we can also analytically compute the solution of Equation (1) (i.e., the thick blue dashed line) and deduce the role of $\tau$ in determining the convergence to the fixed point: $$\displaystyle{r(t) = \big{[}F(I_{\text{ext}};a,\theta) -r(t=0)\big{]} (1-\text{e}^{-\frac{t}{\tau}})} + r(t=0)$$ \\We can now numerically calculate the fixed point with a root finding algorithm. Coding Exercise 2.1.1: Visualization of the fixed pointsWhen it is not possible to find the solution for Equation (3) analytically, a graphical approach can be taken. To that end, it is useful to plot $\displaystyle{\frac{dr}{dt}}$ as a function of $r$. The values of $r$ for which the plotted function crosses zero on the y axis correspond to fixed points. Here, let us, for example, set $w=5.0$ and $I^{\text{ext}}=0.5$. From Equation (3), you can obtain$$\frac{dr}{dt} = [-r + F(w\cdot r + I^{\text{ext}})]\,/\,\tau $$Then, plot the $dr/dt$ as a function of $r$, and check for the presence of fixed points.
###Code
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
#########################################################################
# TODO compute drdt and disable the error
raise NotImplementedError("Finish the compute_drdt function")
#########################################################################
# Calculate drdt
drdt = ...
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Compute dr/dt
drdt = compute_drdt(r, **pars)
# Visualize
plot_dr_r(r, drdt)
# to_remove solution
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
# Calculate drdt
drdt = (-r + F(w * r + I_ext, a, theta)) / tau
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Compute dr/dt
drdt = compute_drdt(r, **pars)
# Visualize
with plt.xkcd():
plot_dr_r(r, drdt)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1.2: Numerical calculution of fixed pointsWe will now find the fixed points numerically. To do so, we need to specify initial values ($r_{\text{guess}}$) for the root-finding algorithm to start from. From the line $\displaystyle{\frac{dr}{dt}}$ plotted above in the last exercise, initial values can be chosen as a set of values close to where the line crosses zero on the y axis (real fixed point).The next cell defines three helper functions that we will use:- `my_fp_single(r_guess, **pars)` uses a root-finding algorithm to locate a fixed point near a given initial value- `check_fp_single(x_fp, **pars)` verifies that the values of $r_{\rm fp}$ for which $\displaystyle{\frac{dr}{dt}} = 0$ are the true fixed points- `my_fp_finder(r_guess_vector, **pars)` accepts an array of initial values and finds the same number of fixed points, using the above two functions
###Code
# @markdown *Execute this cell to enable the fixed point functions*
def my_fp_single(r_guess, a, theta, w, I_ext, **other_pars):
"""
Calculate the fixed point through drE/dt=0
Args:
r_guess : Initial value used for scipy.optimize function
a, theta, w, I_ext : simulation parameters
Returns:
x_fp : value of fixed point
"""
# define the right hand of E dynamics
def my_WCr(x):
r = x
drdt = (-r + F(w * r + I_ext, a, theta))
y = np.array(drdt)
return y
x0 = np.array(r_guess)
x_fp = opt.root(my_WCr, x0).x.item()
return x_fp
def check_fp_single(x_fp, a, theta, w, I_ext, mytol=1e-4, **other_pars):
"""
Verify |dr/dt| < mytol
Args:
fp : value of fixed point
a, theta, w, I_ext: simulation parameters
mytol : tolerance, default as 10^{-4}
Returns :
Whether it is a correct fixed point: True/False
"""
# calculate Equation(3)
y = x_fp - F(w * x_fp + I_ext, a, theta)
# Here we set tolerance as 10^{-4}
return np.abs(y) < mytol
def my_fp_finder(pars, r_guess_vector, mytol=1e-4):
"""
Calculate the fixed point(s) through drE/dt=0
Args:
pars : Parameter dictionary
r_guess_vector : Initial values used for scipy.optimize function
mytol : tolerance for checking fixed point, default as 10^{-4}
Returns:
x_fps : values of fixed points
"""
x_fps = []
correct_fps = []
for r_guess in r_guess_vector:
x_fp = my_fp_single(r_guess, **pars)
if check_fp_single(x_fp, **pars, mytol=mytol):
x_fps.append(x_fp)
return x_fps
help(my_fp_finder)
# Set parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Compute dr/dt
drdt = compute_drdt(r, **pars)
#############################################################################
# TODO for students:
# Define initial values close to the intersections of drdt and y=0
# (How many initial values? Hint: How many times do the two lines intersect?)
# Calculate the fixed point with these initial values and plot them
raise NotImplementedError('student_exercise: find fixed points numerically')
#############################################################################
# Initial guesses for fixed points
r_guess_vector = [...]
# Find fixed point numerically
x_fps = my_fp_finder(pars, r_guess_vector)
# Visualize
plot_dr_r(r, drdt, x_fps)
# to_remove solution
# Set parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Compute dr/dt
drdt = compute_drdt(r, **pars)
# Initial guesses for fixed points
r_guess_vector = [0, .4, .9]
# Find fixed point numerically
x_fps = my_fp_finder(pars, r_guess_vector)
# Visualize
with plt.xkcd():
plot_dr_r(r, drdt, x_fps)
###Output
_____no_output_____
###Markdown
Interactive Demo 2.1: fixed points as a function of recurrent and external inputs.You can now explore how the previous plot changes when the recurrent coupling $w$ and the external input $I_{\text{ext}}$ take different values. How does the number of fixed points change?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def plot_intersection_single(w, I_ext):
# set your parameters
pars = default_pars_single(w=w, I_ext=I_ext)
# find fixed points
r_init_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_init_vector)
# plot
r = np.linspace(0, 1., 1000)
drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']
plot_dr_r(r, drdt, x_fps)
_ = widgets.interact(plot_intersection_single, w=(1, 7, 0.2),
I_ext=(0, 3, 0.1))
# to_remove explanation
"""
The fixed points of the single excitatory neuron population are determined by both
recurrent connections w and external input I_ext. In a previous interactive demo
we saw how the system showed two different steady-states when w = 0. But when w
doe not equal 0, for some range of w the system shows three fixed points and the
steady state depends on the initial conditions (i.e.
r at time zero.).
""";
###Output
_____no_output_____
###Markdown
Section 2.2: Relationship between trajectories & fixed pointsLet's examine the relationship between the population activity over time and the fixed points.Here, let us first set $w=5.0$ and $I_{\text{ext}}=0.5$, and investigate the dynamics of $r(t)$ starting with different initial values $r(0) \equiv r_{\text{init}}$.
###Code
# @markdown Execute to visualize dr/dt
def plot_intersection_single(w, I_ext):
# set your parameters
pars = default_pars_single(w=w, I_ext=I_ext)
# find fixed points
r_init_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_init_vector)
# plot
r = np.linspace(0, 1., 1000)
drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']
plot_dr_r(r, drdt, x_fps)
plot_intersection_single(w = 5.0, I_ext = 0.5)
###Output
_____no_output_____
###Markdown
Interactive Demo 2.2: dynamics as a function of the initial valueLet's now set $r_{\rm init}$ to a value of your choice in this demo. How does the solution change? What do you observe? How does that relate to the previous plot of $\frac{dr}{dt}$?
###Code
# @markdown Make sure you execute this cell to enable the widget!
pars = default_pars_single(w=5.0, I_ext=0.5)
def plot_single_diffEinit(r_init):
pars['r_init'] = r_init
r = simulate_single(pars)
plt.figure()
plt.plot(pars['range_t'], r, 'b', zorder=1)
plt.plot(0, r[0], 'bo', alpha=0.7, zorder=2)
plt.xlabel('t (ms)', fontsize=16)
plt.ylabel(r'$r(t)$', fontsize=16)
plt.ylim(0, 1.0)
plt.show()
_ = widgets.interact(plot_single_diffEinit, r_init=(0, 1, 0.02))
# to_remove explanation
"""
There are three fixed points of the system for these values of
w and I_ext. Choose the initial value in this demo and see in which direction
the system output moves. When r_init is in the vicinity of the leftmost fixed points
it moves towards the left most fixed point. When r_init is in the vicinity of the
rightmost fixed points it moves towards the rightmost fixed point.
""";
###Output
_____no_output_____
###Markdown
We will plot the trajectories of $r(t)$ with $r_{\text{init}} = 0.0, 0.1, 0.2,..., 0.9$.
###Code
# @markdown Execute this cell to see the trajectories!
pars = default_pars_single()
pars['w'] = 5.0
pars['I_ext'] = 0.5
plt.figure(figsize=(8, 5))
for ie in range(10):
pars['r_init'] = 0.1 * ie # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie))
plt.xlabel('t (ms)')
plt.title('Two steady states?')
plt.ylabel(r'$r$(t)')
plt.legend(loc=[1.01, -0.06], fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
We have three fixed points but only two steady states showing up - what's happening? It turns out that the stability of the fixed points matters. If a fixed point is stable, a trajectory starting near that fixed point will stay close to that fixed point and converge to it (the steady state will equal the fixed point). If a fixed point is unstable, any trajectories starting close to it will diverge and go towards stable fixed points. In fact, the only way for a trajectory to reach a stable state at an unstable fixed point is if the initial value **exactly** equals the value of the fixed point. Think! 2.2: Stable vs unstable fixed pointsWhich of the fixed points for the model we've been examining in this section are stable vs unstable?
###Code
# @markdown Execute to print fixed point values
# Initial guesses for fixed points
r_guess_vector = [0, .4, .9]
# Find fixed point numerically
x_fps = my_fp_finder(pars, r_guess_vector)
print(f'Our fixed points are {x_fps}')
# to_remove explanation
"""
The trajectory is converging to either the first fixed point or the third so those
ones are stable. The second fixed point is unstable as we do not see trajectories converging
to it.
"""
###Output
_____no_output_____
###Markdown
We can simulate the trajectory if we start at the unstable fixed point: you can see that it remains at that fixed point (the red line below).
###Code
# @markdown Execute to visualize trajectory starting at unstable fixed point
pars = default_pars_single()
pars['w'] = 5.0
pars['I_ext'] = 0.5
plt.figure(figsize=(8, 5))
for ie in range(10):
pars['r_init'] = 0.1 * ie # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie))
pars['r_init'] = x_fps[1] # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'r', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.4f' % (x_fps[1]))
plt.xlabel('t (ms)')
plt.title('Two steady states?')
plt.ylabel(r'$r$(t)')
plt.legend(loc=[1.01, -0.06], fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
See Bonus Section 1 to cover how to determine the stability of fixed points in a quantitative way. Think! 2: Inhibitory populationsThroughout the tutorial, we have assumed $w> 0 $, i.e., we considered a single population of **excitatory** neurons. What do you think will be the behavior of a population of inhibitory neurons, i.e., where $w> 0$ is replaced by $w< 0$?
###Code
# to_remove explanation
"""
You can check this by going back the interactive demo 2.1 and set the
weight to w<0. You will notice that the system has only one fixed point and that
is at zero value. For this particular dynamics, the system will eventually converge
to zero. But try it out!
""";
###Output
_____no_output_____
###Markdown
--- Summary*Estimated timing of tutorial: 1 hour, 25 minutes*In this tutorial, we have investigated the dynamics of a rate-based single population of neurons.We learned about:- The effect of the input parameters and the time constant of the network on the dynamics of the population.- How to find the fixed point(s) of the system.We build on these concepts in the bonus material - check it out if you have time. You will learn:- How to determine the stability of a fixed point by linearizing the system.- How to add realistic inputs to our model. --- Bonus --- Bonus Section 1: Stability analysis via linearization of the dynamics
###Code
# @title Video 3: Stability of fixed points
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1oA411e7eg", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="KKMlWWU83Jg", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Here we will dive into the math of how to figure out the stability of a fixed point.Just like in our equation for the feedforward network, a generic linear system $$\frac{dx}{dt} = \lambda (x - b),$$ has a fixed point for $x=b$. The analytical solution of such a system can be found to be:$$x(t) = b + \big{(} x(0) - b \big{)} \text{e}^{\lambda t}.$$ Now consider a small perturbation of the activity around the fixed point: $x(0) = b+ \epsilon$, where $|\epsilon| \ll 1$. Will the perturbation $\epsilon(t)$ grow with time or will it decay to the fixed point? The evolution of the perturbation with time can be written, using the analytical solution for $x(t)$, as: $$\epsilon (t) = x(t) - b = \epsilon \text{e}^{\lambda t}$$- if $\lambda < 0$, $\epsilon(t)$ decays to zero, $x(t)$ will still converge to $b$ and the fixed point is "**stable**".- if $\lambda > 0$, $\epsilon(t)$ grows with time, $x(t)$ will leave the fixed point $b$ exponentially, and the fixed point is, therefore, "**unstable**" . Similar to what we did in the linear system above, in order to determine the stability of a fixed point $r^{*}$ of the excitatory population dynamics, we perturb Equation (1) around $r^{*}$ by $\epsilon$, i.e. $r = r^{*} + \epsilon$. We can plug in Equation (1) and obtain the equation determining the time evolution of the perturbation $\epsilon(t)$:\begin{align}\tau \frac{d\epsilon}{dt} \approx -\epsilon + w F'(w\cdot r^{*} + I_{\text{ext}};a,\theta) \epsilon \end{align}where $F'(\cdot)$ is the derivative of the transfer function $F(\cdot)$. We can rewrite the above equation as:\begin{align}\frac{d\epsilon}{dt} \approx \frac{\epsilon}{\tau }[-1 + w F'(w\cdot r^* + I_{\text{ext}};a,\theta)] \end{align}That is, as in the linear system above, the value of$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau \qquad (4)$$determines whether the perturbation will grow or decay to zero, i.e., $\lambda$ defines the stability of the fixed point. This value is called the **eigenvalue** of the dynamical system. The derivative of the sigmoid transfer function is:\begin{align} \frac{dF}{dx} & = \frac{d}{dx} (1+\exp\{-a(x-\theta)\})^{-1} \\& = a\exp\{-a(x-\theta)\} (1+\exp\{-a(x-\theta)\})^{-2}. \qquad (5)\end{align}We provide a helper function `dF` which computes this derivative.
###Code
# @markdown Execute this cell to enable helper function `dF` and visualize derivative
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
# Calculate the population activation
dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2
return dFdx
# Set parameters
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Compute derivative of transfer function
df = dF(x, pars['a'], pars['theta'])
# Visualize
plot_dFdt(x, df)
###Output
_____no_output_____
###Markdown
Bonus Coding Exercise 1: Compute eigenvaluesAs discussed above, for the case with $w=5.0$ and $I_{\text{ext}}=0.5$, the system displays **three** fixed points. However, when we simulated the dynamics and varied the initial conditions $r_{\rm init}$, we could only obtain **two** steady states. In this exercise, we will now check the stability of each of the three fixed points by calculating the corresponding eigenvalues with the function `eig_single`. Check the sign of each eigenvalue (i.e., stability of each fixed point). How many of the fixed points are stable?Note that the expression of the eigenvalue at fixed point $r^*$$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau$$
###Code
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
#####################################################################
## TODO for students: compute eigenvalue and disable the error
raise NotImplementedError("Student excercise: compute the eigenvalue")
######################################################################
# Compute the eigenvalue
eig = ...
return eig
# Find the eigenvalues for all fixed points
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
for fp in x_fp:
eig_fp = eig_single(fp, **pars)
print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
# to_remove solution
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
# Compute the eigenvalue
eig = (-1. + w * dF(w * fp + I_ext, a, theta)) / tau
return eig
# Find the eigenvalues for all fixed points
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
for fp in x_fp:
eig_fp = eig_single(fp, **pars)
print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
**SAMPLE OUTPUT**```Fixed point1 at 0.042 with Eigenvalue=-0.583Fixed point2 at 0.447 with Eigenvalue=0.498Fixed point3 at 0.900 with Eigenvalue=-0.626``` We can see that the first and third fixed points are stable (negative eigenvalues) and the second is unstable (positive eigenvalue) - as we expected! --- Bonus Section 2: Noisy input drives the transition between two stable states As discussed in several previous tutorials, the Ornstein-Uhlenbeck (OU) process is usually used to generate a noisy input into the neuron. The OU input $\eta(t)$ follows: $$\tau_\eta \frac{d}{dt}\eta(t) = -\eta (t) + \sigma_\eta\sqrt{2\tau_\eta}\xi(t)$$Execute the following function `my_OU(pars, sig, myseed=False)` to generate an OU process.
###Code
# @markdown Execute to get helper function `my_OU` and visualize OU process
def my_OU(pars, sig, myseed=False):
"""
A functions that generates Ornstein-Uhlenback process
Args:
pars : parameter dictionary
sig : noise amplitute
myseed : random seed. int or boolean
Returns:
I : Ornstein-Uhlenbeck input current
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
tau_ou = pars['tau_ou'] # [ms]
# set random seed
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# Initialize
noise = np.random.randn(Lt)
I_ou = np.zeros(Lt)
I_ou[0] = noise[0] * sig
# generate OU
for it in range(Lt - 1):
I_ou[it + 1] = (I_ou[it]
+ dt / tau_ou * (0. - I_ou[it])
+ np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1])
return I_ou
pars = default_pars_single(T=100)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
I_ou = my_OU(pars, sig=sig_ou, myseed=2020)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], I_ou, 'r')
plt.xlabel('t (ms)')
plt.ylabel(r'$I_{\mathrm{OU}}$')
plt.show()
###Output
_____no_output_____
###Markdown
In the presence of two or more fixed points, noisy inputs can drive a transition between the fixed points! Here, we stimulate an E population for 1,000 ms applying OU inputs.
###Code
# @markdown Execute this cell to simulate E population with OU inputs
pars = default_pars_single(T=1000)
pars['w'] = 5.0
sig_ou = 0.7
pars['tau_ou'] = 1. # [ms]
pars['I_ext'] = 0.56 + my_OU(pars, sig=sig_ou, myseed=2020)
r = simulate_single(pars)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], r, 'b', alpha=0.8)
plt.xlabel('t (ms)')
plt.ylabel(r'$r(t)$')
plt.show()
###Output
_____no_output_____
###Markdown
Tutorial 1: Neural Rate Models**Week 2, Day 4: Dynamic Networks****By Neuromatch Academy**__Content creators:__ Qinglong Gu, Songtin Li, Arvind Kumar, John Murray, Julijana Gjorgjieva __Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Spiros Chavlis, Michael Waskom --- Tutorial ObjectivesThe brain is a complex system, not because it is composed of a large number of diverse types of neurons, but mainly because of how neurons are connected to each other. The brain is indeed a network of highly specialized neuronal networks. The activity of a neural network constantly evolves in time. For this reason, neurons can be modeled as dynamical systems. The dynamical system approach is only one of the many modeling approaches that computational neuroscientists have developed (other points of view include information processing, statistical models, etc.). How the dynamics of neuronal networks affect the representation and processing of information in the brain is an open question. However, signatures of altered brain dynamics present in many brain diseases (e.g., in epilepsy or Parkinson's disease) tell us that it is crucial to study network activity dynamics if we want to understand the brain.In this tutorial, we will simulate and study one of the simplest models of biological neuronal networks. Instead of modeling and simulating individual excitatory neurons (e.g., LIF models that you implemented yesterday), we will treat them as a single homogeneous population and approximate their dynamics using a single one-dimensional equation describing the evolution of their average spiking rate in time.In this tutorial, we will learn how to build a firing rate model of a single population of excitatory neurons. **Steps:**- Write the equation for the firing rate dynamics of a 1D excitatory population.- Visualize the response of the population as a function of parameters such as threshold level and gain, using the frequency-current (F-I) curve.- Numerically simulate the dynamics of the excitatory population and find the fixed points of the system. - Investigate the stability of the fixed points by linearizing the dynamics around them. --- Setup
###Code
# Imports
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt # root-finding algorithm
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Helper functions
def plot_fI(x, f):
plt.figure(figsize=(6, 4)) # plot the figure
plt.plot(x, f, 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
def plot_dr_r(r, drdt, x_fps=None):
plt.figure()
plt.plot(r, drdt, 'k')
plt.plot(r, 0. * r, 'k--')
if x_fps is not None:
plt.plot(x_fps, np.zeros_like(x_fps), "ko", ms=12)
plt.xlabel(r'$r$')
plt.ylabel(r'$\frac{dr}{dt}$', fontsize=20)
plt.ylim(-0.1, 0.1)
def plot_dFdt(x, dFdt):
plt.figure()
plt.plot(x, dFdt, 'r')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('dF(x)', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Neuronal network dynamics
###Code
# @title Video 1: Dynamic networks
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="p848349hPyw", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Section 1.1: Dynamics of a single excitatory populationIndividual neurons respond by spiking. When we average the spikes of neurons in a population, we can define the average firing activity of the population. In this model, we are interested in how the population-averaged firing varies as a function of time and network parameters. Mathematically, we can describe the firing rate dynamic as:\begin{align}\tau \frac{dr}{dt} &= -r + F(w\cdot r + I_{\text{ext}}) \quad\qquad (1)\end{align}$r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $w$ denotes the strength (synaptic weight) of the recurrent input to the population, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs.To start building the model, please execute the cell below to initialize the simulation parameters.
###Code
# @markdown *Execute this cell to set default parameters for a single excitatory population model*
def default_pars_single(**kwargs):
pars = {}
# Excitatory parameters
pars['tau'] = 1. # Timescale of the E population [ms]
pars['a'] = 1.2 # Gain of the E population
pars['theta'] = 2.8 # Threshold of the E population
# Connection strength
pars['w'] = 0. # E to E, we first set it to 0
# External input
pars['I_ext'] = 0.
# simulation parameters
pars['T'] = 20. # Total duration of simulation [ms]
pars['dt'] = .1 # Simulation time step [ms]
pars['r_init'] = 0.2 # Initial value of E
# External parameters if any
pars.update(kwargs)
# Vector of discretized time points [ms]
pars['range_t'] = np.arange(0, pars['T'], pars['dt'])
return pars
###Output
_____no_output_____
###Markdown
You can now use:- `pars = default_pars_single()` to get all the parameters, and then you can execute `print(pars)` to check these parameters. - `pars = default_pars_single(T=T_sim, dt=time_step)` to set new simulation time and time step- To update an existing parameter dictionary, use `pars['New_para'] = value`Because `pars` is a dictionary, it can be passed to a function that requires individual parameters as arguments using `my_func(**pars)` syntax. Section 1.2: F-I curvesIn electrophysiology, a neuron is often characterized by its spike rate output in response to input currents. This is often called the **F-I** curve, denoting the output spike frequency (**F**) in response to different injected currents (**I**). We estimated this for an LIF neuron in yesterday's tutorial.The transfer function $F(\cdot)$ in Equation $1$ represents the gain of the population as a function of the total input. The gain is often modeled as a sigmoidal function, i.e., more input drive leads to a nonlinear increase in the population firing rate. The output firing rate will eventually saturate for high input values. A sigmoidal $F(\cdot)$ is parameterized by its gain $a$ and threshold $\theta$.$$ F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}} \quad(2)$$The argument $x$ represents the input to the population. Note that the second term is chosen so that $F(0;a,\theta)=0$.Many other transfer functions (generally monotonic) can be also used. Examples are the rectified linear function $ReLU(x)$ or the hyperbolic tangent $tanh(x)$. Exercise 1: Implement F-I curve Let's first investigate the activation functions before simulating the dynamics of the entire population. In this exercise, you will implement a sigmoidal **F-I** curve or transfer function $F(x)$, with gain $a$ and threshold level $\theta$ as parameters.
###Code
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
#################################################
## TODO for students: compute f = F(x) ##
# Fill out function and remove
raise NotImplementedError("Student excercise: implement the f-I function")
#################################################
# Define the sigmoidal transfer function f = F(x)
f = ...
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# f = F(x, pars['a'], pars['theta'])
# plot_fI(x, f)
# to_remove solution
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
# Define the sigmoidal transfer function f = F(x)
f = (1 + np.exp(-a * (x - theta)))**-1 - (1 + np.exp(a * theta))**-1
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
f = F(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_fI(x, f)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter exploration of F-I curveHere's an interactive demo that shows how the F-I curve changes for different values of the gain and threshold parameters. How do the gain and threshold parameters affect the F-I curve?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def interactive_plot_FI(a, theta):
"""
Population activation function.
Expecxts:
a : the gain of the function
theta : the threshold of the function
Returns:
plot the F-I curve with give parameters
"""
# set the range of input
x = np.arange(0, 10, .1)
plt.figure()
plt.plot(x, F(x, a, theta), 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
_ = widgets.interact(interactive_plot_FI, a=(0.3, 3, 0.3), theta=(2, 4, 0.2))
# to_remove explanation
"""
Discussion:
For the function we have chosen to model the F-I curve (eq 2),
- a determines the slope (gain) of the rising phase of the F-I curve
- theta determines the input at which the function F(x) reaches its mid-value (0.5).
That is, theta shifts the F-I curve along the horizontal axis.
For our neurons we are using in this tutorial:
- a controls the gain of the neuron population
- theta controls the threshold at which the neuron population starts to respond
""";
###Output
_____no_output_____
###Markdown
Section 1.3: Simulation scheme of E dynamicsBecause $F(\cdot)$ is a nonlinear function, the exact solution of Equation $1$ can not be determined via analytical methods. Therefore, numerical methods must be used to find the solution. In practice, the derivative on the left-hand side of Equation $1$ can be approximated using the Euler method on a time-grid of stepsize $\Delta t$:\begin{align}&\frac{dr}{dt} \approx \frac{r[k+1]-r[k]}{\Delta t} \end{align}where $r[k] = r(k\Delta t)$. Thus,$$\Delta r[k] = \frac{\Delta t}{\tau}[-r[k] + F(w\cdot r[k] + I_{\text{ext}}[k];a,\theta)]$$Hence, Equation (1) is updated at each time step by:$$r[k+1] = r[k] + \Delta r[k]$$
###Code
# @markdown *Execute this cell to enable the single population rate model simulator: `simulate_single`*
def simulate_single(pars):
"""
Simulate an excitatory population of neurons
Args:
pars : Parameter dictionary
Returns:
rE : Activity of excitatory population (array)
Example:
pars = default_pars_single()
r = simulate_single(pars)
"""
# Set parameters
tau, a, theta = pars['tau'], pars['a'], pars['theta']
w = pars['w']
I_ext = pars['I_ext']
r_init = pars['r_init']
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Initialize activity
r = np.zeros(Lt)
r[0] = r_init
I_ext = I_ext * np.ones(Lt)
# Update the E activity
for k in range(Lt - 1):
dr = dt / tau * (-r[k] + F(w * r[k] + I_ext[k], a, theta))
r[k+1] = r[k] + dr
return r
help(simulate_single)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter Exploration of single population dynamicsNote that $w=0$, as in the default setting, means no recurrent input to the neuron population in Equation (1). Hence, the dynamics are entirely determined by the external input $I_{\text{ext}}$. Explore these dynamics in this interactive demo.How does $r_{\text{sim}}(t)$ change with different $I_{\text{ext}}$ values? How does it change with different $\tau$ values? Investigate the relationship between $F(I_{\text{ext}}; a, \theta)$ and the steady value of $r(t)$. Note that, $r_{\rm ana}(t)$ denotes the analytical solution - you will learn how this is computed in the next section.
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
# get default parameters
pars = default_pars_single(T=20.)
def Myplot_E_diffI_difftau(I_ext, tau):
# set external input and time constant
pars['I_ext'] = I_ext
pars['tau'] = tau
# simulation
r = simulate_single(pars)
# Analytical Solution
r_ana = (pars['r_init']
+ (F(I_ext, pars['a'], pars['theta'])
- pars['r_init']) * (1. - np.exp(-pars['range_t'] / pars['tau'])))
# plot
plt.figure()
plt.plot(pars['range_t'], r, 'b', label=r'$r_{\mathrm{sim}}$(t)', alpha=0.5,
zorder=1)
plt.plot(pars['range_t'], r_ana, 'b--', lw=5, dashes=(2, 2),
label=r'$r_{\mathrm{ana}}$(t)', zorder=2)
plt.plot(pars['range_t'],
F(I_ext, pars['a'], pars['theta']) * np.ones(pars['range_t'].size),
'k--', label=r'$F(I_{\mathrm{ext}})$')
plt.xlabel('t (ms)', fontsize=16.)
plt.ylabel('Activity r(t)', fontsize=16.)
plt.legend(loc='best', fontsize=14.)
plt.show()
_ = widgets.interact(Myplot_E_diffI_difftau, I_ext=(0.0, 10., 1.),
tau=(1., 5., 0.2))
# to_remove explanation
"""
Discussion:
Given the choice of F-I curve (eq 2) and dynamics of the neuron population (eq. 1)
the neurons have two fixed points or steady-state responses irrespective of the input.
- Weak inputs to the neurons eventually result in the activity converging to zero
- Strong inputs to the neurons eventually result in the activity converging to max value
The time constant tau, does not affect the steady-state response but it determines
the time the neurons take to reach to their fixed point.
""";
###Output
_____no_output_____
###Markdown
Think!Above, we have numerically solved a system driven by a positive input. Yet, $r_E(t)$ either decays to zero or reaches a fixed non-zero value.- Why doesn't the solution of the system "explode" in a finite time? In other words, what guarantees that $r_E$(t) stays finite? - Which parameter would you change in order to increase the maximum value of the response?
###Code
# to_remove explanation
"""
Discussion:
1) As the F-I curve is bounded between zero and one, the system doesn't explode.
The f-curve guarantees this property
2) One way to increase the maximum response is to change the f-I curve. For
example, the ReLU is an unbounded function, and thus will increase the overall maximal
response of the network.
""";
###Output
_____no_output_____
###Markdown
--- Section 2: Fixed points of the single population system
###Code
# @title Video 2: Fixed point
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Ox3ELd1UFyo", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
As you varied the two parameters in the last Interactive Demo, you noticed that, while at first the system output quickly changes, with time, it reaches its maximum/minimum value and does not change anymore. The value eventually reached by the system is called the **steady state** of the system, or the **fixed point**. Essentially, in the steady states the derivative with respect to time of the activity ($r$) is zero, i.e. $\displaystyle \frac{dr}{dt}=0$. We can find that the steady state of the Equation. (1) by setting $\displaystyle{\frac{dr}{dt}=0}$ and solve for $r$:$$-r_{\text{steady}} + F(w\cdot r_{\text{steady}} + I_{\text{ext}};a,\theta) = 0, \qquad (3)$$When it exists, the solution of Equation. (3) defines a **fixed point** of the dynamical system in Equation (1). Note that if $F(x)$ is nonlinear, it is not always possible to find an analytical solution, but the solution can be found via numerical simulations, as we will do later.From the Interactive Demo, one could also notice that the value of $\tau$ influences how quickly the activity will converge to the steady state from its initial value. In the specific case of $w=0$, we can also analytically compute the solution of Equation (1) (i.e., the thick blue dashed line) and deduce the role of $\tau$ in determining the convergence to the fixed point: $$\displaystyle{r(t) = \big{[}F(I_{\text{ext}};a,\theta) -r(t=0)\big{]} (1-\text{e}^{-\frac{t}{\tau}})} + r(t=0)$$ \\We can now numerically calculate the fixed point with a root finding algorithm. Exercise 2: Visualization of the fixed pointsWhen it is not possible to find the solution for Equation (3) analytically, a graphical approach can be taken. To that end, it is useful to plot $\displaystyle{\frac{dr}{dt}}$ as a function of $r$. The values of $r$ for which the plotted function crosses zero on the y axis correspond to fixed points. Here, let us, for example, set $w=5.0$ and $I^{\text{ext}}=0.5$. From Equation (1), you can obtain$$\frac{dr}{dt} = [-r + F(w\cdot r + I^{\text{ext}})]\,/\,\tau $$Then, plot the $dr/dt$ as a function of $r$, and check for the presence of fixed points.
###Code
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
#########################################################################
# TODO compute drdt and disable the error
raise NotImplementedError("Finish the compute_drdt function")
#########################################################################
# Calculate drdt
drdt = ...
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Uncomment to test your function
# drdt = compute_drdt(r, **pars)
# plot_dr_r(r, drdt)
# to_remove solution
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
# Calculate drdt
drdt = (-r + F(w * r + I_ext, a, theta)) / tau
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
with plt.xkcd():
plot_dr_r(r, drdt)
###Output
_____no_output_____
###Markdown
Exercise 3: Fixed point calculationWe will now find the fixed points numerically. To do so, we need to specif initial values ($r_{\text{guess}}$) for the root-finding algorithm to start from. From the line $\displaystyle{\frac{dr}{dt}}$ plotted above in Exercise 2, initial values can be chosen as a set of values close to where the line crosses zero on the y axis (real fixed point).The next cell defines three helper functions that we will use:- `my_fp_single(r_guess, **pars)` uses a root-finding algorithm to locate a fixed point near a given initial value- `check_fp_single(x_fp, **pars)`, verifies that the values of $r_{\rm fp}$ for which $\displaystyle{\frac{dr}{dt}} = 0$ are the true fixed points- `my_fp_finder(r_guess_vector, **pars)` accepts an array of initial values and finds the same number of fixed points, using the above two functions
###Code
# @markdown *Execute this cell to enable the fixed point functions*
def my_fp_single(r_guess, a, theta, w, I_ext, **other_pars):
"""
Calculate the fixed point through drE/dt=0
Args:
r_guess : Initial value used for scipy.optimize function
a, theta, w, I_ext : simulation parameters
Returns:
x_fp : value of fixed point
"""
# define the right hand of E dynamics
def my_WCr(x):
r = x
drdt = (-r + F(w * r + I_ext, a, theta))
y = np.array(drdt)
return y
x0 = np.array(r_guess)
x_fp = opt.root(my_WCr, x0).x.item()
return x_fp
def check_fp_single(x_fp, a, theta, w, I_ext, mytol=1e-4, **other_pars):
"""
Verify |dr/dt| < mytol
Args:
fp : value of fixed point
a, theta, w, I_ext: simulation parameters
mytol : tolerance, default as 10^{-4}
Returns :
Whether it is a correct fixed point: True/False
"""
# calculate Equation(3)
y = x_fp - F(w * x_fp + I_ext, a, theta)
# Here we set tolerance as 10^{-4}
return np.abs(y) < mytol
def my_fp_finder(pars, r_guess_vector, mytol=1e-4):
"""
Calculate the fixed point(s) through drE/dt=0
Args:
pars : Parameter dictionary
r_guess_vector : Initial values used for scipy.optimize function
mytol : tolerance for checking fixed point, default as 10^{-4}
Returns:
x_fps : values of fixed points
"""
x_fps = []
correct_fps = []
for r_guess in r_guess_vector:
x_fp = my_fp_single(r_guess, **pars)
if check_fp_single(x_fp, **pars, mytol=mytol):
x_fps.append(x_fp)
return x_fps
help(my_fp_finder)
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
#############################################################################
# TODO for students:
# Define initial values close to the intersections of drdt and y=0
# (How many initial values? Hint: How many times do the two lines intersect?)
# Calculate the fixed point with these initial values and plot them
#############################################################################
r_guess_vector = [...]
# Uncomment to test your values
# x_fps = my_fp_finder(pars, r_guess_vector)
# plot_dr_r(r, drdt, x_fps)
# to_remove solution
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
r_guess_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_guess_vector)
with plt.xkcd():
plot_dr_r(r, drdt, x_fps)
###Output
_____no_output_____
###Markdown
Interactive Demo: fixed points as a function of recurrent and external inputs.You can now explore how the previous plot changes when the recurrent coupling $w$ and the external input $I_{\text{ext}}$ take different values. How does the number of fixed points change?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def plot_intersection_single(w, I_ext):
# set your parameters
pars = default_pars_single(w=w, I_ext=I_ext)
# find fixed points
r_init_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_init_vector)
# plot
r = np.linspace(0, 1., 1000)
drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']
plot_dr_r(r, drdt, x_fps)
_ = widgets.interact(plot_intersection_single, w=(1, 7, 0.2),
I_ext=(0, 3, 0.1))
# to_remove explanation
"""
Discussion:
The fixed points of the single excitatory neuron population are determined by both
recurrent connections w and external input I_ext. In a previous interactive demo
we saw how the system showed two different steady-states when w = 0. But when w
doe not equal 0, for some range of w the system shows three fixed points (the middle
one being unstable) and the steady state depends on the initial conditions (i.e.
r at time zero.).
More on this will be explained in the next section.
""";
###Output
_____no_output_____
###Markdown
--- SummaryIn this tutorial, we have investigated the dynamics of a rate-based single population of neurons.We learned about:- The effect of the input parameters and the time constant of the network on the dynamics of the population.- How to find the fixed point(s) of the system.Next, we have two Bonus, but important concepts in dynamical system analysis and simulation. If you have time left, watch the next video and proceed to solve the exercises. You will learn:- How to determine the stability of a fixed point by linearizing the system.- How to add realistic inputs to our model. --- Bonus 1: Stability of a fixed point
###Code
# @title Video 3: Stability of fixed points
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="KKMlWWU83Jg", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Initial values and trajectoriesHere, let us first set $w=5.0$ and $I_{\text{ext}}=0.5$, and investigate the dynamics of $r(t)$ starting with different initial values $r(0) \equiv r_{\text{init}}$. We will plot the trajectories of $r(t)$ with $r_{\text{init}} = 0.0, 0.1, 0.2,..., 0.9$.
###Code
# @markdown Execute this cell to see the trajectories!
pars = default_pars_single()
pars['w'] = 5.0
pars['I_ext'] = 0.5
plt.figure(figsize=(8, 5))
for ie in range(10):
pars['r_init'] = 0.1 * ie # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie))
plt.xlabel('t (ms)')
plt.title('Two steady states?')
plt.ylabel(r'$r$(t)')
plt.legend(loc=[1.01, -0.06], fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
Interactive Demo: dynamics as a function of the initial valueLet's now set $r_{\rm init}$ to a value of your choice in this demo. How does the solution change? What do you observe?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
pars = default_pars_single(w=5.0, I_ext=0.5)
def plot_single_diffEinit(r_init):
pars['r_init'] = r_init
r = simulate_single(pars)
plt.figure()
plt.plot(pars['range_t'], r, 'b', zorder=1)
plt.plot(0, r[0], 'bo', alpha=0.7, zorder=2)
plt.xlabel('t (ms)', fontsize=16)
plt.ylabel(r'$r(t)$', fontsize=16)
plt.ylim(0, 1.0)
plt.show()
_ = widgets.interact(plot_single_diffEinit, r_init=(0, 1, 0.02))
# to_remove explanation
"""
Discussion:
To better appreciate what is happening here, you should go back to the previous
interactive demo. Set the w = 5 and I_ext = 0.5.
You will find that there are three fixed points of the system for these values of
w and I_ext. Now, choose the initial value in this demo and see in which direction
the system output moves. When r_init is in the vicinity of the leftmost fixed points
it moves towards the left most fixed point. When r_init is in the vicinity of the
rightmost fixed points it moves towards the rightmost fixed point.
""";
###Output
_____no_output_____
###Markdown
Stability analysis via linearization of the dynamicsJust like Equation $1$ in the case ($w=0$) discussed above, a generic linear system $$\frac{dx}{dt} = \lambda (x - b),$$ has a fixed point for $x=b$. The analytical solution of such a system can be found to be:$$x(t) = b + \big{(} x(0) - b \big{)} \text{e}^{\lambda t}.$$ Now consider a small perturbation of the activity around the fixed point: $x(0) = b+ \epsilon$, where $|\epsilon| \ll 1$. Will the perturbation $\epsilon(t)$ grow with time or will it decay to the fixed point? The evolution of the perturbation with time can be written, using the analytical solution for $x(t)$, as: $$\epsilon (t) = x(t) - b = \epsilon \text{e}^{\lambda t}$$- if $\lambda < 0$, $\epsilon(t)$ decays to zero, $x(t)$ will still converge to $b$ and the fixed point is "**stable**".- if $\lambda > 0$, $\epsilon(t)$ grows with time, $x(t)$ will leave the fixed point $b$ exponentially, and the fixed point is, therefore, "**unstable**" . Compute the stability of Equation $1$Similar to what we did in the linear system above, in order to determine the stability of a fixed point $r^{*}$ of the excitatory population dynamics, we perturb Equation (1) around $r^{*}$ by $\epsilon$, i.e. $r = r^{*} + \epsilon$. We can plug in Equation (1) and obtain the equation determining the time evolution of the perturbation $\epsilon(t)$:\begin{align}\tau \frac{d\epsilon}{dt} \approx -\epsilon + w F'(w\cdot r^{*} + I_{\text{ext}};a,\theta) \epsilon \end{align}where $F'(\cdot)$ is the derivative of the transfer function $F(\cdot)$. We can rewrite the above equation as:\begin{align}\frac{d\epsilon}{dt} \approx \frac{\epsilon}{\tau }[-1 + w F'(w\cdot r^* + I_{\text{ext}};a,\theta)] \end{align}That is, as in the linear system above, the value of$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau \qquad (4)$$determines whether the perturbation will grow or decay to zero, i.e., $\lambda$ defines the stability of the fixed point. This value is called the **eigenvalue** of the dynamical system. Exercise 4: Compute $dF$The derivative of the sigmoid transfer function is:\begin{align} \frac{dF}{dx} & = \frac{d}{dx} (1+\exp\{-a(x-\theta)\})^{-1} \\& = a\exp\{-a(x-\theta)\} (1+\exp\{-a(x-\theta)\})^{-2}. \qquad (5)\end{align}Let's now find the expression for the derivative $\displaystyle{\frac{dF}{dx}}$ in the following cell and plot it.
###Code
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
###########################################################################
# TODO for students: compute dFdx ##
raise NotImplementedError("Student excercise: compute the deravitive of F")
###########################################################################
# Calculate the population activation
dFdx = ...
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# df = dF(x, pars['a'], pars['theta'])
# plot_dFdt(x, df)
# to_remove solution
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
# Calculate the population activation
dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
df = dF(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_dFdt(x, df)
###Output
_____no_output_____
###Markdown
Exercise 5: Compute eigenvaluesAs discussed above, for the case with $w=5.0$ and $I_{\text{ext}}=0.5$, the system displays **three** fixed points. However, when we simulated the dynamics and varied the initial conditions $r_{\rm init}$, we could only obtain **two** steady states. In this exercise, we will now check the stability of each of the three fixed points by calculating the corresponding eigenvalues with the function `eig_single`. Check the sign of each eigenvalue (i.e., stability of each fixed point). How many of the fixed points are stable?Note that the expression of the eigenvalue at fixed point $r^*$$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau$$
###Code
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
#####################################################################
## TODO for students: compute eigenvalue and disable the error
raise NotImplementedError("Student excercise: compute the eigenvalue")
######################################################################
# Compute the eigenvalue
eig = ...
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
# Uncomment below lines after completing the eig_single function.
# for fp in x_fp:
# eig_fp = eig_single(fp, **pars)
# print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
**SAMPLE OUTPUT**```Fixed point1 at 0.042 with Eigenvalue=-0.583Fixed point2 at 0.447 with Eigenvalue=0.498Fixed point3 at 0.900 with Eigenvalue=-0.626```
###Code
# to_remove solution
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
# Compute the eigenvalue
eig = (-1. + w * dF(w * fp + I_ext, a, theta)) / tau
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
for fp in x_fp:
eig_fp = eig_single(fp, **pars)
print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
Think! Throughout the tutorial, we have assumed $w> 0 $, i.e., we considered a single population of **excitatory** neurons. What do you think will be the behavior of a population of inhibitory neurons, i.e., where $w> 0$ is replaced by $w< 0$?
###Code
# to_remove explanation
"""
Discussion:
You can check this by going back the second last interactive demo and set the
weight to w<0. You will notice that the system has only one fixed point and that
is at zero value. For this particular dynamics, the system will eventually converge
to zero. But try it out.
""";
###Output
_____no_output_____
###Markdown
--- Bonus 2: Noisy input drives the transition between two stable states Ornstein-Uhlenbeck (OU) processAs discussed in several previous tutorials, the OU process is usually used to generate a noisy input into the neuron. The OU input $\eta(t)$ follows: $$\tau_\eta \frac{d}{dt}\eta(t) = -\eta (t) + \sigma_\eta\sqrt{2\tau_\eta}\xi(t)$$Execute the following function `my_OU(pars, sig, myseed=False)` to generate an OU process.
###Code
# @title OU process `my_OU(pars, sig, myseed=False)`
# @markdown Make sure you execute this cell to visualize the noise!
def my_OU(pars, sig, myseed=False):
"""
A functions that generates Ornstein-Uhlenback process
Args:
pars : parameter dictionary
sig : noise amplitute
myseed : random seed. int or boolean
Returns:
I : Ornstein-Uhlenbeck input current
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
tau_ou = pars['tau_ou'] # [ms]
# set random seed
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# Initialize
noise = np.random.randn(Lt)
I_ou = np.zeros(Lt)
I_ou[0] = noise[0] * sig
# generate OU
for it in range(Lt - 1):
I_ou[it + 1] = (I_ou[it]
+ dt / tau_ou * (0. - I_ou[it])
+ np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1])
return I_ou
pars = default_pars_single(T=100)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
I_ou = my_OU(pars, sig=sig_ou, myseed=2020)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], I_ou, 'r')
plt.xlabel('t (ms)')
plt.ylabel(r'$I_{\mathrm{OU}}$')
plt.show()
###Output
_____no_output_____
###Markdown
Example: Up-Down transitionIn the presence of two or more fixed points, noisy inputs can drive a transition between the fixed points! Here, we stimulate an E population for 1,000 ms applying OU inputs.
###Code
# @title Simulation of an E population with OU inputs
# @markdown Make sure you execute this cell to spot the Up-Down states!
pars = default_pars_single(T=1000)
pars['w'] = 5.0
sig_ou = 0.7
pars['tau_ou'] = 1. # [ms]
pars['I_ext'] = 0.56 + my_OU(pars, sig=sig_ou, myseed=2020)
r = simulate_single(pars)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], r, 'b', alpha=0.8)
plt.xlabel('t (ms)')
plt.ylabel(r'$r(t)$')
plt.show()
###Output
_____no_output_____
###Markdown
Neuromatch Academy: Week 2, Day 4, Tutorial 1 Neuronal Network Dynamics: Neural Rate Models__Content creators:__ Qinglong Gu, Songtin Li, Arvind Kumar, John Murray, Julijana Gjorgjieva __Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Spiros Chavlis, Michael Waskom --- Tutorial ObjectivesThe brain is a complex system, not because it is composed of a large number of diverse types of neurons, but mainly because of how neurons are connected to each other. The brain is indeed a network of highly specialized neuronal networks. The activity of a neural network constantly evolves in time. For this reason, neurons can be modeled as dynamical systems. The dynamical system approach is only one of the many modeling approaches that computational neuroscientists have developed (other points of view include information processing, statistical models, etc.). How the dynamics of neuronal networks affect the representation and processing of information in the brain is an open question. However, signatures of altered brain dynamics present in many brain diseases (e.g., in epilepsy or Parkinson's disease) tell us that it is crucial to study network activity dynamics if we want to understand the brain.In this tutorial, we will simulate and study one of the simplest models of biological neuronal networks. Instead of modeling and simulating individual excitatory neurons (e.g., LIF models that you implemented yesterday), we will treat them as a single homogeneous population and approximate their dynamics using a single one-dimensional equation describing the evolution of their average spiking rate in time.In this tutorial, we will learn how to build a firing rate model of a single population of excitatory neurons. **Steps:**- Write the equation for the firing rate dynamics of a 1D excitatory population.- Visualize the response of the population as a function of parameters such as threshold level and gain, using the frequency-current (F-I) curve.- Numerically simulate the dynamics of the excitatory population and find the fixed points of the system. - Investigate the stability of the fixed points by linearizing the dynamics around them. --- Setup
###Code
# Imports
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt # root-finding algorithm
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Helper functions
def plot_fI(x, f):
plt.figure(figsize=(6, 4)) # plot the figure
plt.plot(x, f, 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
def plot_dr_r(r, drdt, x_fps=None):
plt.figure()
plt.plot(r, drdt, 'k')
plt.plot(r, 0. * r, 'k--')
if x_fps is not None:
plt.plot(x_fps, np.zeros_like(x_fps), "ko", ms=12)
plt.xlabel(r'$r$')
plt.ylabel(r'$\frac{dr}{dt}$', fontsize=20)
plt.ylim(-0.1, 0.1)
def plot_dFdt(x, dFdt):
plt.figure()
plt.plot(x, dFdt, 'r')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('dF(x)', fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
--- Section 1: Neuronal network dynamics
###Code
# @title Video 1: Dynamic networks
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="p848349hPyw", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Section 1.1: Dynamics of a single excitatory populationIndividual neurons respond by spiking. When we average the spikes of neurons in a population, we can define the average firing activity of the population. In this model, we are interested in how the population-averaged firing varies as a function of time and network parameters. Mathematically, we can describe the firing rate dynamic as:\begin{align}\tau \frac{dr}{dt} &= -r + F(w\cdot r + I_{\text{ext}}) \quad\qquad (1)\end{align}$r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $w$ denotes the strength (synaptic weight) of the recurrent input to the population, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs.To start building the model, please execute the cell below to initialize the simulation parameters.
###Code
# @markdown *Execute this cell to set default parameters for a single excitatory population model*
def default_pars_single(**kwargs):
pars = {}
# Excitatory parameters
pars['tau'] = 1. # Timescale of the E population [ms]
pars['a'] = 1.2 # Gain of the E population
pars['theta'] = 2.8 # Threshold of the E population
# Connection strength
pars['w'] = 0. # E to E, we first set it to 0
# External input
pars['I_ext'] = 0.
# simulation parameters
pars['T'] = 20. # Total duration of simulation [ms]
pars['dt'] = .1 # Simulation time step [ms]
pars['r_init'] = 0.2 # Initial value of E
# External parameters if any
pars.update(kwargs)
# Vector of discretized time points [ms]
pars['range_t'] = np.arange(0, pars['T'], pars['dt'])
return pars
###Output
_____no_output_____
###Markdown
You can now use:- `pars = default_pars_single()` to get all the parameters, and then you can execute `print(pars)` to check these parameters. - `pars = default_pars_single(T=T_sim, dt=time_step)` to set new simulation time and time step- To update an existing parameter dictionary, use `pars['New_para'] = value`Because `pars` is a dictionary, it can be passed to a function that requires individual parameters as arguments using `my_func(**pars)` syntax. Section 1.2: F-I curvesIn electrophysiology, a neuron is often characterized by its spike rate output in response to input currents. This is often called the **F-I** curve, denoting the output spike frequency (**F**) in response to different injected currents (**I**). We estimated this for an LIF neuron in yesterday's tutorial.The transfer function $F(\cdot)$ in Equation $1$ represents the gain of the population as a function of the total input. The gain is often modeled as a sigmoidal function, i.e., more input drive leads to a nonlinear increase in the population firing rate. The output firing rate will eventually saturate for high input values. A sigmoidal $F(\cdot)$ is parameterized by its gain $a$ and threshold $\theta$.$$ F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}} \quad(2)$$The argument $x$ represents the input to the population. Note that the second term is chosen so that $F(0;a,\theta)=0$.Many other transfer functions (generally monotonic) can be also used. Examples are the rectified linear function $ReLU(x)$ or the hyperbolic tangent $tanh(x)$. Exercise 1: Implement F-I curve Let's first investigate the activation functions before simulating the dynamics of the entire population. In this exercise, you will implement a sigmoidal **F-I** curve or transfer function $F(x)$, with gain $a$ and threshold level $\theta$ as parameters.
###Code
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
#################################################
## TODO for students: compute f = F(x) ##
# Fill out function and remove
raise NotImplementedError("Student excercise: implement the f-I function")
#################################################
# Define the sigmoidal transfer function f = F(x)
f = ...
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# f = F(x, pars['a'], pars['theta'])
# plot_fI(x, f)
# to_remove solution
def F(x, a, theta):
"""
Population activation function.
Args:
x (float): the population input
a (float): the gain of the function
theta (float): the threshold of the function
Returns:
float: the population activation response F(x) for input x
"""
# Define the sigmoidal transfer function f = F(x)
f = (1 + np.exp(-a * (x - theta)))**-1 - (1 + np.exp(a * theta))**-1
return f
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
f = F(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_fI(x, f)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter exploration of F-I curveHere's an interactive demo that shows how the F-I curve changes for different values of the gain and threshold parameters. How do the gain and threshold parameters affect the F-I curve?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def interactive_plot_FI(a, theta):
"""
Population activation function.
Expecxts:
a : the gain of the function
theta : the threshold of the function
Returns:
plot the F-I curve with give parameters
"""
# set the range of input
x = np.arange(0, 10, .1)
plt.figure()
plt.plot(x, F(x, a, theta), 'k')
plt.xlabel('x (a.u.)', fontsize=14)
plt.ylabel('F(x)', fontsize=14)
plt.show()
_ = widgets.interact(interactive_plot_FI, a=(0.3, 3, 0.3), theta=(2, 4, 0.2))
# to_remove explanation
"""
Discussion:
For the function we have chosen to model the F-I curve (eq 2),
- a determines the slope (gain) of the rising phase of the F-I curve
- theta determines the input at which the function F(x) reaches its mid-value (0.5).
That is, theta shifts the F-I curve along the horizontal axis.
For our neurons we are using in this tutorial:
- a controls the gain of the neuron population
- theta controls the threshold at which the neuron population starts to respond
""";
###Output
_____no_output_____
###Markdown
Section 1.3: Simulation scheme of E dynamicsBecause $F(\cdot)$ is a nonlinear function, the exact solution of Equation $1$ can not be determined via analytical methods. Therefore, numerical methods must be used to find the solution. In practice, the derivative on the left-hand side of Equation $1$ can be approximated using the Euler method on a time-grid of stepsize $\Delta t$:\begin{align}&\frac{dr}{dt} \approx \frac{r[k+1]-r[k]}{\Delta t} \end{align}where $r[k] = r(k\Delta t)$. Thus,$$\Delta r[k] = \frac{\Delta t}{\tau}[-r[k] + F(w\cdot r[k] + I_{\text{ext}}[k];a,\theta)]$$Hence, Equation (1) is updated at each time step by:$$r[k+1] = r[k] + \Delta r[k]$$
###Code
# @markdown *Execute this cell to enable the single population rate model simulator: `simulate_single`*
def simulate_single(pars):
"""
Simulate an excitatory population of neurons
Args:
pars : Parameter dictionary
Returns:
rE : Activity of excitatory population (array)
Example:
pars = default_pars_single()
r = simulate_single(pars)
"""
# Set parameters
tau, a, theta = pars['tau'], pars['a'], pars['theta']
w = pars['w']
I_ext = pars['I_ext']
r_init = pars['r_init']
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Initialize activity
r = np.zeros(Lt)
r[0] = r_init
I_ext = I_ext * np.ones(Lt)
# Update the E activity
for k in range(Lt - 1):
dr = dt / tau * (-r[k] + F(w * r[k] + I_ext[k], a, theta))
r[k+1] = r[k] + dr
return r
help(simulate_single)
###Output
_____no_output_____
###Markdown
Interactive Demo: Parameter Exploration of single population dynamicsNote that $w=0$, as in the default setting, means no recurrent input to the neuron population in Equation (1). Hence, the dynamics are entirely determined by the external input $I_{\text{ext}}$. Explore these dynamics in this interactive demo.How does $r_{\text{sim}}(t)$ change with different $I_{\text{ext}}$ values? How does it change with different $\tau$ values? Investigate the relationship between $F(I_{\text{ext}}; a, \theta)$ and the steady value of $r(t)$. Note that, $r_{\rm ana}(t)$ denotes the analytical solution - you will learn how this is computed in the next section.
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
# get default parameters
pars = default_pars_single(T=20.)
def Myplot_E_diffI_difftau(I_ext, tau):
# set external input and time constant
pars['I_ext'] = I_ext
pars['tau'] = tau
# simulation
r = simulate_single(pars)
# Analytical Solution
r_ana = (pars['r_init']
+ (F(I_ext, pars['a'], pars['theta'])
- pars['r_init']) * (1. - np.exp(-pars['range_t'] / pars['tau'])))
# plot
plt.figure()
plt.plot(pars['range_t'], r, 'b', label=r'$r_{\mathrm{sim}}$(t)', alpha=0.5,
zorder=1)
plt.plot(pars['range_t'], r_ana, 'b--', lw=5, dashes=(2, 2),
label=r'$r_{\mathrm{ana}}$(t)', zorder=2)
plt.plot(pars['range_t'],
F(I_ext, pars['a'], pars['theta']) * np.ones(pars['range_t'].size),
'k--', label=r'$F(I_{\mathrm{ext}})$')
plt.xlabel('t (ms)', fontsize=16.)
plt.ylabel('Activity r(t)', fontsize=16.)
plt.legend(loc='best', fontsize=14.)
plt.show()
_ = widgets.interact(Myplot_E_diffI_difftau, I_ext=(0.0, 10., 1.),
tau=(1., 5., 0.2))
# to_remove explanation
"""
Discussion:
Given the choice of F-I curve (eq 2) and dynamics of the neuron population (eq. 1)
the neurons have two fixed points or steady-state responses irrespective of the input.
- Weak inputs to the neurons eventually result in the activity converging to zero
- Strong inputs to the neurons eventually result in the activity converging to max value
The time constant tau, does not affect the steady-state response but it determines
the time the neurons take to reach to their fixed point.
""";
###Output
_____no_output_____
###Markdown
Think!Above, we have numerically solved a system driven by a positive input. Yet, $r_E(t)$ either decays to zero or reaches a fixed non-zero value.- Why doesn't the solution of the system "explode" in a finite time? In other words, what guarantees that $r_E$(t) stays finite? - Which parameter would you change in order to increase the maximum value of the response?
###Code
# to_remove explanation
"""
Discussion:
1) As the F-I curve is bounded between zero and one, the system doesn't explode.
The f-curve guarantees this property
2) One way to increase the maximum response is to change the f-I curve. For
example, the ReLU is an unbounded function, and thus will increase the overall maximal
response of the network.
""";
###Output
_____no_output_____
###Markdown
--- Section 2: Fixed points of the single population system
###Code
# @title Video 2: Fixed point
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Ox3ELd1UFyo", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
As you varied the two parameters in the last Interactive Demo, you noticed that, while at first the system output quickly changes, with time, it reaches its maximum/minimum value and does not change anymore. The value eventually reached by the system is called the **steady state** of the system, or the **fixed point**. Essentially, in the steady states the derivative with respect to time of the activity ($r$) is zero, i.e. $\displaystyle \frac{dr}{dt}=0$. We can find that the steady state of the Equation. (1) by setting $\displaystyle{\frac{dr}{dt}=0}$ and solve for $r$:$$-r_{\text{steady}} + F(w\cdot r_{\text{steady}} + I_{\text{ext}};a,\theta) = 0, \qquad (3)$$When it exists, the solution of Equation. (3) defines a **fixed point** of the dynamical system in Equation (1). Note that if $F(x)$ is nonlinear, it is not always possible to find an analytical solution, but the solution can be found via numerical simulations, as we will do later.From the Interactive Demo, one could also notice that the value of $\tau$ influences how quickly the activity will converge to the steady state from its initial value. In the specific case of $w=0$, we can also analytically compute the solution of Equation (1) (i.e., the thick blue dashed line) and deduce the role of $\tau$ in determining the convergence to the fixed point: $$\displaystyle{r(t) = \big{[}F(I_{\text{ext}};a,\theta) -r(t=0)\big{]} (1-\text{e}^{-\frac{t}{\tau}})} + r(t=0)$$ \\We can now numerically calculate the fixed point with a root finding algorithm. Exercise 2: Visualization of the fixed pointsWhen it is not possible to find the solution for Equation (3) analytically, a graphical approach can be taken. To that end, it is useful to plot $\displaystyle{\frac{dr}{dt}}$ as a function of $r$. The values of $r$ for which the plotted function crosses zero on the y axis correspond to fixed points. Here, let us, for example, set $w=5.0$ and $I^{\text{ext}}=0.5$. From Equation (1), you can obtain$$\frac{dr}{dt} = [-r + F(w\cdot r + I^{\text{ext}})]\,/\,\tau $$Then, plot the $dr/dt$ as a function of $r$, and check for the presence of fixed points.
###Code
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
#########################################################################
# TODO compute drdt and disable the error
raise NotImplementedError("Finish the compute_drdt function")
#########################################################################
# Calculate drdt
drdt = ...
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
# Uncomment to test your function
# drdt = compute_drdt(r, **pars)
# plot_dr_r(r, drdt)
# to_remove solution
def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):
"""Given parameters, compute dr/dt as a function of r.
Args:
r (1D array) : Average firing rate of the excitatory population
I_ext, w, a, theta, tau (numbers): Simulation parameters to use
other_pars : Other simulation parameters are unused by this function
Returns
drdt function for each value of r
"""
# Calculate drdt
drdt = (-r + F(w * r + I_ext, a, theta)) / tau
return drdt
# Define a vector of r values and the simulation parameters
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
with plt.xkcd():
plot_dr_r(r, drdt)
###Output
_____no_output_____
###Markdown
Exercise 3: Fixed point calculationWe will now find the fixed points numerically. To do so, we need to specif initial values ($r_{\text{guess}}$) for the root-finding algorithm to start from. From the line $\displaystyle{\frac{dr}{dt}}$ plotted above in Exercise 2, initial values can be chosen as a set of values close to where the line crosses zero on the y axis (real fixed point).The next cell defines three helper functions that we will use:- `my_fp_single(r_guess, **pars)` uses a root-finding algorithm to locate a fixed point near a given initial value- `check_fp_single(x_fp, **pars)`, verifies that the values of $r_{\rm fp}$ for which $\displaystyle{\frac{dr}{dt}} = 0$ are the true fixed points- `my_fp_finder(r_guess_vector, **pars)` accepts an array of initial values and finds the same number of fixed points, using the above two functions
###Code
# @markdown *Execute this cell to enable the fixed point functions*
def my_fp_single(r_guess, a, theta, w, I_ext, **other_pars):
"""
Calculate the fixed point through drE/dt=0
Args:
r_guess : Initial value used for scipy.optimize function
a, theta, w, I_ext : simulation parameters
Returns:
x_fp : value of fixed point
"""
# define the right hand of E dynamics
def my_WCr(x):
r = x
drdt = (-r + F(w * r + I_ext, a, theta))
y = np.array(drdt)
return y
x0 = np.array(r_guess)
x_fp = opt.root(my_WCr, x0).x.item()
return x_fp
def check_fp_single(x_fp, a, theta, w, I_ext, mytol=1e-4, **other_pars):
"""
Verify |dr/dt| < mytol
Args:
fp : value of fixed point
a, theta, w, I_ext: simulation parameters
mytol : tolerance, default as 10^{-4}
Returns :
Whether it is a correct fixed point: True/False
"""
# calculate Equation(3)
y = x_fp - F(w * x_fp + I_ext, a, theta)
# Here we set tolerance as 10^{-4}
return np.abs(y) < mytol
def my_fp_finder(pars, r_guess_vector, mytol=1e-4):
"""
Calculate the fixed point(s) through drE/dt=0
Args:
pars : Parameter dictionary
r_guess_vector : Initial values used for scipy.optimize function
mytol : tolerance for checking fixed point, default as 10^{-4}
Returns:
x_fps : values of fixed points
"""
x_fps = []
correct_fps = []
for r_guess in r_guess_vector:
x_fp = my_fp_single(r_guess, **pars)
if check_fp_single(x_fp, **pars, mytol=mytol):
x_fps.append(x_fp)
return x_fps
help(my_fp_finder)
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
#############################################################################
# TODO for students:
# Define initial values close to the intersections of drdt and y=0
# (How many initial values? Hint: How many times do the two lines intersect?)
# Calculate the fixed point with these initial values and plot them
#############################################################################
r_guess_vector = [...]
# Uncomment to test your values
# x_fps = my_fp_finder(pars, r_guess_vector)
# plot_dr_r(r, drdt, x_fps)
# to_remove solution
r = np.linspace(0, 1, 1000)
pars = default_pars_single(I_ext=0.5, w=5)
drdt = compute_drdt(r, **pars)
r_guess_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_guess_vector)
with plt.xkcd():
plot_dr_r(r, drdt, x_fps)
###Output
_____no_output_____
###Markdown
Interactive Demo: fixed points as a function of recurrent and external inputs.You can now explore how the previous plot changes when the recurrent coupling $w$ and the external input $I_{\text{ext}}$ take different values. How does the number of fixed points change?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
def plot_intersection_single(w, I_ext):
# set your parameters
pars = default_pars_single(w=w, I_ext=I_ext)
# find fixed points
r_init_vector = [0, .4, .9]
x_fps = my_fp_finder(pars, r_init_vector)
# plot
r = np.linspace(0, 1., 1000)
drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']
plot_dr_r(r, drdt, x_fps)
_ = widgets.interact(plot_intersection_single, w=(1, 7, 0.2),
I_ext=(0, 3, 0.1))
# to_remove explanation
"""
Discussion:
The fixed points of the single excitatory neuron population are determined by both
recurrent connections w and external input I_ext. In a previous interactive demo
we saw how the system showed two different steady-states when w = 0. But when w
doe not equal 0, for some range of w the system shows three fixed points (the middle
one being unstable) and the steady state depends on the initial conditions (i.e.
r at time zero.).
More on this will be explained in the next section.
""";
###Output
_____no_output_____
###Markdown
--- SummaryIn this tutorial, we have investigated the dynamics of a rate-based single population of neurons.We learned about:- The effect of the input parameters and the time constant of the network on the dynamics of the population.- How to find the fixed point(s) of the system.Next, we have two Bonus, but important concepts in dynamical system analysis and simulation. If you have time left, watch the next video and proceed to solve the exercises. You will learn:- How to determine the stability of a fixed point by linearizing the system.- How to add realistic inputs to our model. --- Bonus 1: Stability of a fixed point
###Code
# @title Video 3: Stability of fixed points
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="KKMlWWU83Jg", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
###Output
_____no_output_____
###Markdown
Initial values and trajectoriesHere, let us first set $w=5.0$ and $I_{\text{ext}}=0.5$, and investigate the dynamics of $r(t)$ starting with different initial values $r(0) \equiv r_{\text{init}}$. We will plot the trajectories of $r(t)$ with $r_{\text{init}} = 0.0, 0.1, 0.2,..., 0.9$.
###Code
# @markdown Execute this cell to see the trajectories!
pars = default_pars_single()
pars['w'] = 5.0
pars['I_ext'] = 0.5
plt.figure(figsize=(8, 5))
for ie in range(10):
pars['r_init'] = 0.1 * ie # set the initial value
r = simulate_single(pars) # run the simulation
# plot the activity with given initial
plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,
label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie))
plt.xlabel('t (ms)')
plt.title('Two steady states?')
plt.ylabel(r'$r$(t)')
plt.legend(loc=[1.01, -0.06], fontsize=14)
plt.show()
###Output
_____no_output_____
###Markdown
Interactive Demo: dynamics as a function of the initial valueLet's now set $r_{\rm init}$ to a value of your choice in this demo. How does the solution change? What do you observe?
###Code
# @title
# @markdown Make sure you execute this cell to enable the widget!
pars = default_pars_single(w=5.0, I_ext=0.5)
def plot_single_diffEinit(r_init):
pars['r_init'] = r_init
r = simulate_single(pars)
plt.figure()
plt.plot(pars['range_t'], r, 'b', zorder=1)
plt.plot(0, r[0], 'bo', alpha=0.7, zorder=2)
plt.xlabel('t (ms)', fontsize=16)
plt.ylabel(r'$r(t)$', fontsize=16)
plt.ylim(0, 1.0)
plt.show()
_ = widgets.interact(plot_single_diffEinit, r_init=(0, 1, 0.02))
# to_remove explanation
"""
Discussion:
To better appreciate what is happening here, you should go back to the previous
interactive demo. Set the w = 5 and I_ext = 0.5.
You will find that there are three fixed points of the system for these values of
w and I_ext. Now, choose the initial value in this demo and see in which direction
the system output moves. When r_init is in the vicinity of the leftmost fixed points
it moves towards the left most fixed point. When r_init is in the vicinity of the
rightmost fixed points it moves towards the rightmost fixed point.
""";
###Output
_____no_output_____
###Markdown
Stability analysis via linearization of the dynamicsJust like Equation $1$ in the case ($w=0$) discussed above, a generic linear system $$\frac{dx}{dt} = \lambda (x - b),$$ has a fixed point for $x=b$. The analytical solution of such a system can be found to be:$$x(t) = b + \big{(} x(0) - b \big{)} \text{e}^{\lambda t}.$$ Now consider a small perturbation of the activity around the fixed point: $x(0) = b+ \epsilon$, where $|\epsilon| \ll 1$. Will the perturbation $\epsilon(t)$ grow with time or will it decay to the fixed point? The evolution of the perturbation with time can be written, using the analytical solution for $x(t)$, as: $$\epsilon (t) = x(t) - b = \epsilon \text{e}^{\lambda t}$$- if $\lambda < 0$, $\epsilon(t)$ decays to zero, $x(t)$ will still converge to $b$ and the fixed point is "**stable**".- if $\lambda > 0$, $\epsilon(t)$ grows with time, $x(t)$ will leave the fixed point $b$ exponentially, and the fixed point is, therefore, "**unstable**" . Compute the stability of Equation $1$Similar to what we did in the linear system above, in order to determine the stability of a fixed point $r^{*}$ of the excitatory population dynamics, we perturb Equation (1) around $r^{*}$ by $\epsilon$, i.e. $r = r^{*} + \epsilon$. We can plug in Equation (1) and obtain the equation determining the time evolution of the perturbation $\epsilon(t)$:\begin{align}\tau \frac{d\epsilon}{dt} \approx -\epsilon + w F'(w\cdot r^{*} + I_{\text{ext}};a,\theta) \epsilon \end{align}where $F'(\cdot)$ is the derivative of the transfer function $F(\cdot)$. We can rewrite the above equation as:\begin{align}\frac{d\epsilon}{dt} \approx \frac{\epsilon}{\tau }[-1 + w F'(w\cdot r^* + I_{\text{ext}};a,\theta)] \end{align}That is, as in the linear system above, the value of$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau \qquad (4)$$determines whether the perturbation will grow or decay to zero, i.e., $\lambda$ defines the stability of the fixed point. This value is called the **eigenvalue** of the dynamical system. Exercise 4: Compute $dF$The derivative of the sigmoid transfer function is:\begin{align} \frac{dF}{dx} & = \frac{d}{dx} (1+\exp\{-a(x-\theta)\})^{-1} \\& = a\exp\{-a(x-\theta)\} (1+\exp\{-a(x-\theta)\})^{-2}. \qquad (5)\end{align}Let's now find the expression for the derivative $\displaystyle{\frac{dF}{dx}}$ in the following cell and plot it.
###Code
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
###########################################################################
# TODO for students: compute dFdx ##
raise NotImplementedError("Student excercise: compute the deravitive of F")
###########################################################################
# Calculate the population activation
dFdx = ...
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
# Uncomment below to test your function
# df = dF(x, pars['a'], pars['theta'])
# plot_dFdt(x, df)
# to_remove solution
def dF(x, a, theta):
"""
Population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : the population activation response F(x) for input x
"""
# Calculate the population activation
dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2
return dFdx
pars = default_pars_single() # get default parameters
x = np.arange(0, 10, .1) # set the range of input
df = dF(x, pars['a'], pars['theta'])
with plt.xkcd():
plot_dFdt(x, df)
###Output
_____no_output_____
###Markdown
Exercise 5: Compute eigenvaluesAs discussed above, for the case with $w=5.0$ and $I_{\text{ext}}=0.5$, the system displays **three** fixed points. However, when we simulated the dynamics and varied the initial conditions $r_{\rm init}$, we could only obtain **two** steady states. In this exercise, we will now check the stability of each of the three fixed points by calculating the corresponding eigenvalues with the function `eig_single`. Check the sign of each eigenvalue (i.e., stability of each fixed point). How many of the fixed points are stable?Note that the expression of the eigenvalue at fixed point $r^*$$$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau$$
###Code
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
#####################################################################
## TODO for students: compute eigenvalue and disable the error
raise NotImplementedError("Student excercise: compute the eigenvalue")
######################################################################
# Compute the eigenvalue
eig = ...
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
# Uncomment below lines after completing the eig_single function.
# for fp in x_fp:
# eig_fp = eig_single(fp, **pars)
# print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
**SAMPLE OUTPUT**```Fixed point1 at 0.042 with Eigenvalue=-0.583Fixed point2 at 0.447 with Eigenvalue=0.498Fixed point3 at 0.900 with Eigenvalue=-0.626```
###Code
# to_remove solution
def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):
"""
Args:
fp : fixed point r_fp
tau, a, theta, w, I_ext : Simulation parameters
Returns:
eig : eigevalue of the linearized system
"""
# Compute the eigenvalue
eig = (-1. + w * dF(w * fp + I_ext, a, theta)) / tau
return eig
# Find the eigenvalues for all fixed points of Exercise 2
pars = default_pars_single(w=5, I_ext=.5)
r_guess_vector = [0, .4, .9]
x_fp = my_fp_finder(pars, r_guess_vector)
for fp in x_fp:
eig_fp = eig_single(fp, **pars)
print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')
###Output
_____no_output_____
###Markdown
Think! Throughout the tutorial, we have assumed $w> 0 $, i.e., we considered a single population of **excitatory** neurons. What do you think will be the behavior of a population of inhibitory neurons, i.e., where $w> 0$ is replaced by $w< 0$?
###Code
# to_remove explanation
"""
Discussion:
You can check this by going back the second last interactive demo and set the
weight to w<0. You will notice that the system has only one fixed point and that
is at zero value. For this particular dynamics, the system will eventually converge
to zero. But try it out.
""";
###Output
_____no_output_____
###Markdown
--- Bonus 2: Noisy input drives the transition between two stable states Ornstein-Uhlenbeck (OU) processAs discussed in several previous tutorials, the OU process is usually used to generate a noisy input into the neuron. The OU input $\eta(t)$ follows: $$\tau_\eta \frac{d}{dt}\eta(t) = -\eta (t) + \sigma_\eta\sqrt{2\tau_\eta}\xi(t)$$Execute the following function `my_OU(pars, sig, myseed=False)` to generate an OU process.
###Code
# @title OU process `my_OU(pars, sig, myseed=False)`
# @markdown Make sure you execute this cell to visualize the noise!
def my_OU(pars, sig, myseed=False):
"""
A functions that generates Ornstein-Uhlenback process
Args:
pars : parameter dictionary
sig : noise amplitute
myseed : random seed. int or boolean
Returns:
I : Ornstein-Uhlenbeck input current
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
tau_ou = pars['tau_ou'] # [ms]
# set random seed
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# Initialize
noise = np.random.randn(Lt)
I_ou = np.zeros(Lt)
I_ou[0] = noise[0] * sig
# generate OU
for it in range(Lt - 1):
I_ou[it + 1] = (I_ou[it]
+ dt / tau_ou * (0. - I_ou[it])
+ np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1])
return I_ou
pars = default_pars_single(T=100)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
I_ou = my_OU(pars, sig=sig_ou, myseed=2020)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], I_ou, 'r')
plt.xlabel('t (ms)')
plt.ylabel(r'$I_{\mathrm{OU}}$')
plt.show()
###Output
_____no_output_____
###Markdown
Example: Up-Down transitionIn the presence of two or more fixed points, noisy inputs can drive a transition between the fixed points! Here, we stimulate an E population for 1,000 ms applying OU inputs.
###Code
# @title Simulation of an E population with OU inputs
# @markdown Make sure you execute this cell to spot the Up-Down states!
pars = default_pars_single(T=1000)
pars['w'] = 5.0
sig_ou = 0.7
pars['tau_ou'] = 1. # [ms]
pars['I_ext'] = 0.56 + my_OU(pars, sig=sig_ou, myseed=2020)
r = simulate_single(pars)
plt.figure(figsize=(10, 4))
plt.plot(pars['range_t'], r, 'b', alpha=0.8)
plt.xlabel('t (ms)')
plt.ylabel(r'$r(t)$')
plt.show()
###Output
_____no_output_____ |
4.analyze-components/1.visualize-reconstruction.ipynb | ###Markdown
Interpretation of Compression Models Curating and Visualizing Reconstruction Loss**Gregory Way 2018**Compiling results of the z dimensionality sweep across algorithms and datasets.The data was generated first by running the script [2.ensemble-z-analysis/analysis.sh](https://github.com/greenelab/BioBombe/blob/master/2.ensemble-z-analysis/analysis.sh) as follows:```bashbash 2.ensemble-z-analysis/analysis.sh``` Structure:The notebook first curates all of the reconstruction loss results across datasets and outputs them in long format.Next, the results are visualized in a series of figures describing reconstruction loss. Output:1. Curated reconstruction results across datasets in long matrix format.2. Reconstruction loss figures across algorithms and dimensions.
###Code
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(cowplot))
# Load helper functions
source(file.path("scripts", "util.R"))
###Output
_____no_output_____
###Markdown
TARGET Reconstruction Results
###Code
# Define the dataset to compile results for
dataset <- 'TARGET'
base_dir <- file.path("figures", dataset)
target_recon_cost_df <- compile_reconstruction_data(dataset)
recon_file <- file.path("results", paste0("reconstruction_", dataset, ".tsv"))
# Write results to file
readr::write_tsv(target_recon_cost_df, path = recon_file)
target_recon_gg <- plot_reconstruction_loss(target_recon_cost_df)
target_path <- file.path(base_dir, paste0("reconstruction_cost_", dataset))
save_png_pdf(p = target_recon_gg,
path_prefix = target_path,
height = 70,
width = 170)
target_recon_gg
# Compile VAE specific reconstruction loss
target_vae_recon_cost_df <- compile_reconstruction_data(dataset, data_focus = "vae")
target_vae_loss_gg <- plot_vae_training(target_vae_recon_cost_df)
target_path <- file.path(base_dir, paste0("vae_training_reconstruction_", dataset))
save_png_pdf(p = target_vae_loss_gg,
path_prefix = target_path,
height = 130,
width = 100)
target_vae_loss_gg
###Output
_____no_output_____
###Markdown
TCGA Reconstruction Results
###Code
# Define the dataset to compile results for
dataset <- 'TCGA'
base_dir <- file.path("figures", dataset)
tcga_recon_cost_df <- compile_reconstruction_data(dataset)
recon_file <- file.path("results", paste0("reconstruction_", dataset, ".tsv"))
# Write results to file
readr::write_tsv(tcga_recon_cost_df, path = recon_file)
tcga_recon_gg <- plot_reconstruction_loss(tcga_recon_cost_df)
tcga_path <- file.path(base_dir, paste0("reconstruction_cost_", dataset))
save_png_pdf(p = tcga_recon_gg,
path_prefix = tcga_path,
height = 70,
width = 170)
tcga_recon_gg
# Compile VAE specific reconstruction loss
tcga_vae_recon_cost_df <- compile_reconstruction_data(dataset, data_focus = "vae")
tcga_vae_loss_gg <- plot_vae_training(tcga_vae_recon_cost_df)
tcga_path <- file.path(base_dir, paste0("vae_training_reconstruction_", dataset))
save_png_pdf(p = tcga_vae_loss_gg,
path_prefix = tcga_path,
height = 130,
width = 100)
tcga_vae_loss_gg
###Output
_____no_output_____
###Markdown
Filter TCGA Results to Iterations that Converged
###Code
# Subset to iterations that may have converged
tcga_recon_cost_df <- tcga_recon_cost_df %>% dplyr::filter(reconstruction_cost < 4000)
tcga_recon_filter_gg <- plot_reconstruction_loss(tcga_recon_cost_df)
tcga_path <- file.path(base_dir, paste0("reconstruction_cost_subset_converge_", dataset))
save_png_pdf(p = tcga_recon_filter_gg,
path_prefix = tcga_path,
height = 70,
width = 170)
tcga_recon_filter_gg
# Subset to testing non-shuffled data
tcga_recon_cost_df <- tcga_recon_cost_df %>%
dplyr::filter(data_type == 'testing', shuffled == 'False')
tcga_recon_filter_test_gg <- plot_reconstruction_loss(tcga_recon_cost_df)
tcga_path <- file.path(base_dir, paste0("reconstruction_cost_subset_converge_testing_", dataset))
save_png_pdf(p = tcga_recon_filter_test_gg,
path_prefix = tcga_path,
height = 70,
width = 170)
tcga_recon_filter_test_gg
# Remove shuffled data and replot
tcga_vae_recon_cost_df <- tcga_vae_recon_cost_df %>% dplyr::filter(shuffle == "False")
tcga_vae_loss_filter_test_gg <- plot_vae_training(tcga_vae_recon_cost_df)
tcga_path <- file.path(base_dir, paste0("vae_training_reconstruction_subset_converge_", dataset))
save_png_pdf(p = tcga_vae_loss_filter_test_gg,
path_prefix = tcga_path,
height = 130,
width = 100)
tcga_vae_loss_filter_test_gg
###Output
_____no_output_____
###Markdown
GTEx Reconstruction Results
###Code
# Define the dataset to compile results for
dataset <- "GTEX"
base_dir <- file.path("figures", dataset)
gtex_recon_cost_df <- compile_reconstruction_data(dataset)
recon_file <- file.path("results", paste0("reconstruction_", dataset, ".tsv"))
# Write results to file
readr::write_tsv(gtex_recon_cost_df, path = recon_file)
gtex_recon_gg <- plot_reconstruction_loss(gtex_recon_cost_df)
gtex_path <- file.path(base_dir, paste0("reconstruction_cost_", dataset))
save_png_pdf(p = gtex_recon_gg,
path_prefix = gtex_path,
height = 70,
width = 170)
gtex_recon_gg
# Define the dataset to compile results for
gtex_vae_recon_cost_df <- compile_reconstruction_data(dataset, data_focus = "vae")
gtex_vae_loss_gg <- plot_vae_training(gtex_vae_recon_cost_df)
gtex_path <- file.path(base_dir, paste0("vae_training_reconstruction_", dataset))
save_png_pdf(p = gtex_vae_loss_gg,
path_prefix = gtex_path,
height = 130,
width = 100)
gtex_vae_loss_gg
###Output
_____no_output_____
###Markdown
Filter GTEx Results
###Code
# Subset to iterations that may have converged
gtex_recon_cost_df <- gtex_recon_cost_df %>% dplyr::filter(reconstruction_cost < 5000)
gtex_recon_filter_gg <- plot_reconstruction_loss(gtex_recon_cost_df)
gtex_path <- file.path(base_dir, paste0("reconstruction_cost_subset_converge_", dataset))
save_png_pdf(p = gtex_recon_filter_gg,
path_prefix = gtex_path,
height = 70,
width = 170)
gtex_recon_filter_gg
# Subset to testing non-shuffled data
gtex_recon_cost_df <- gtex_recon_cost_df %>%
dplyr::filter(data_type == 'testing', shuffled == 'False')
gtex_recon_filter_test_gg <- plot_reconstruction_loss(gtex_recon_cost_df)
gtex_path <- file.path(base_dir, paste0("reconstruction_cost_subset_converge_testing_", dataset))
save_png_pdf(p = gtex_recon_filter_test_gg,
path_prefix = gtex_path,
height = 70,
width = 170)
gtex_recon_filter_test_gg
# Remove shuffled data and replot
gtex_vae_recon_cost_df <- gtex_vae_recon_cost_df %>% dplyr::filter(shuffle == "False")
gtex_vae_loss_filter_test_gg <- plot_vae_training(gtex_vae_recon_cost_df)
gtex_path <- file.path(base_dir, paste0("vae_training_reconstruction_subset_converge_", dataset))
save_png_pdf(p = gtex_vae_loss_filter_test_gg,
path_prefix = gtex_path,
height = 130,
width = 100)
gtex_vae_loss_filter_test_gg
###Output
_____no_output_____
###Markdown
Create Supplementary Figure Describing Algorithm Loss across Dimensions
###Code
legend <- get_legend(target_recon_gg)
main_plot <- (
cowplot::plot_grid(
gtex_recon_filter_test_gg + ggtitle('GTEX') + xlab('') +
theme(plot.margin = margin(t = 0.5, r = 0.2, b = 0, l = 0.4),
legend.position = "none",
panel.grid.major = element_line(size = 0.25),
panel.grid.minor = element_line(size = 0.175)),
tcga_recon_filter_test_gg + ggtitle('TCGA') + xlab('') +
theme(plot.margin = margin(t = 0, r = 0.2, b = 0, l = 0.4),
legend.position = "none",
panel.grid.major = element_line(size = 0.25),
panel.grid.minor = element_line(size = 0.175)),
target_recon_gg + ggtitle('TARGET') +
theme(plot.margin = margin(t = 0, r = 0.2, b = 0.3, l = 0.4),
legend.position = "none",
panel.grid.major = element_line(size = 0.25),
panel.grid.minor = element_line(size = 0.175)),
labels = c("a", "b", "c"),
ncol = 1,
nrow = 3
)
)
main_plot = cowplot::plot_grid(main_plot, legend, rel_widths = c(1, 0.15), ncol = 2)
main_plot
main_path <- file.path("figures", "reconstruction_summary")
save_png_pdf(p = main_plot,
path_prefix = main_path,
height = 130,
width = 170)
###Output
_____no_output_____ |
examples/models.regression_example.ipynb | ###Markdown
`models.regression` example----- Load packages
###Code
from transparentai.models import regression
from transparentai.datasets import load_boston
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Load & prepare data
###Code
data = load_boston()
X, Y = data.drop(columns='MEDV'), data['MEDV']
###Output
_____no_output_____
###Markdown
Split train test
###Code
X_train, X_valid, Y_train, Y_valid = train_test_split(X, Y, test_size=0.33, random_state=42)
###Output
_____no_output_____
###Markdown
Train classifier
###Code
regr = LinearRegression()
regr.fit(X_train, Y_train)
###Output
_____no_output_____
###Markdown
Prepare param
###Code
y_true = Y_train
y_pred = regr.predict(X_train)
y_true_valid = Y_valid
y_pred_valid = regr.predict(X_valid)
###Output
_____no_output_____
###Markdown
Use `regression.compute_metrics` List of usable metrics on documentation.You can add custom metrics ! with ```python lambda y_true, y_pred: ...```
###Code
metrics = ['MAE', 'MSE', 'RMSE', 'r2', lambda y_true, y_pred: sum(y_true-y_pred)]
regression.compute_metrics(y_true_valid, y_pred_valid, metrics)
###Output
_____no_output_____
###Markdown
Use `classification.plot_performance` Only validation
###Code
regression.plot_performance(y_true_valid, y_pred_valid)
###Output
_____no_output_____
###Markdown
Train set and validation
###Code
regression.plot_performance(y_true, y_pred, y_true_valid, y_pred_valid)
###Output
_____no_output_____ |
notebooks/00_Intro/Interfacing_R.ipynb | ###Markdown
The next cell will get a ~65 MB data file 'sequence.index', you only need to run the cell once
###Code
!rm sequence.index 2>/dev/null
!wget -nd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/historical_data/former_toplevel/sequence.index -O sequence.index
###Output
--2019-09-26 09:59:06-- ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/historical_data/former_toplevel/sequence.index
=> ‘sequence.index’
Resolving ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)... 193.62.192.8
Connecting to ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)|193.62.192.8|:21... connected.
Logging in as anonymous ... Logged in!
==> SYST ... done. ==> PWD ... done.
==> TYPE I ... done. ==> CWD (1) /vol1/ftp/historical_data/former_toplevel ... done.
==> SIZE sequence.index ... 67069489
==> PASV ... done. ==> RETR sequence.index ... done.
Length: 67069489 (64M) (unauthoritative)
sequence.index 100%[===================>] 63.96M 2.07MB/s in 29s
2019-09-26 09:59:37 (2.24 MB/s) - ‘sequence.index’ saved [67069489]
###Markdown
Interfacing with R
###Code
# !conda install rpy2
!pip install rpy2
import os
from IPython.display import Image
import rpy2.robjects
import rpy2.robjects as robjects
## import rpy2.robjects as robjects
## this doesnt work so rpy2.situation works!
import rpy2.situation as robjects
#import rpy2.robjects.lib.ggplot2 as ggplot2
from rpy2.robjects.functions import SignatureTranslatedFunction
import pandas as pd
import pandas.rpy.common as pd_common
read_delim = robjects.r('read.delim')
seq_data = read_delim('sequence.index', header=True, stringsAsFactors=False)
#In R:
# seq.data <- read.delim('sequence.index', header=TRUE, stringsAsFactors=FALSE)
print('This data frame has %d columns and %d rows' % (seq_data.ncol, seq_data.nrow))
print(seq_data.colnames)
#In R:
# print(colnames(seq.data))
# print(nrow(seq.data))
# print(ncol(seq.data))
print('Columns in Python %d ' % robjects.r.ncol(seq_data)[0])
#access some functions
as_integer = robjects.r('as.integer')
match = robjects.r.match
my_col = match('READ_COUNT', seq_data.colnames)[0] # Vector returned
print('Type of read count before as.integer: %s' % seq_data[my_col - 1].rclass[0])
seq_data[my_col - 1] = as_integer(seq_data[my_col - 1])
print('Type of read count after as.integer: %s' % seq_data[my_col - 1].rclass[0])
my_col = match('BASE_COUNT', seq_data.colnames)[0] # Vector returned
seq_data[my_col - 1] = as_integer(seq_data[my_col - 1])
my_col = match('CENTER_NAME', seq_data.colnames)[0]
seq_data[my_col - 1] = robjects.r.toupper(seq_data[my_col - 1])
robjects.r.assign('seq.data', seq_data)
robjects.r('print(c("Column names in R: ",colnames(seq.data)))')
robjects.r('seq.data <- seq.data[seq.data$WITHDRAWN==0, ]')
#Lets remove all withdrawn sequences
robjects.r("seq.data <- seq.data[, c('STUDY_ID', 'STUDY_NAME', 'CENTER_NAME', 'SAMPLE_ID', 'SAMPLE_NAME', 'POPULATION', 'INSTRUMENT_PLATFORM', 'LIBRARY_LAYOUT', 'PAIRED_FASTQ', 'READ_COUNT', 'BASE_COUNT', 'ANALYSIS_GROUP')]")
#Lets shorten the dataframe
#Population as factor
robjects.r('seq.data$POPULATION <- as.factor(seq.data$POPULATION)')
ggplot2.theme = SignatureTranslatedFunction(ggplot2.theme,
init_prm_translate = {'axis_text_x': 'axis.text.x'})
bar = ggplot2.ggplot(seq_data) + ggplot2.geom_bar() + ggplot2.aes_string(x='CENTER_NAME') + ggplot2.theme(axis_text_x=ggplot2.element_text(angle=90, hjust=1))
robjects.r.png('out.png')
bar.plot()
dev_off = robjects.r('dev.off')
dev_off()
Image(filename='out.png')
#Get Yoruba and CEU
robjects.r('yri_ceu <- seq.data[seq.data$POPULATION %in% c("YRI", "CEU") & seq.data$BASE_COUNT < 2E9 & seq.data$READ_COUNT < 3E7, ]')
yri_ceu = robjects.r('yri_ceu')
scatter = ggplot2.ggplot(yri_ceu) + ggplot2.aes_string(x='BASE_COUNT', y='READ_COUNT', shape='factor(POPULATION)', col='factor(ANALYSIS_GROUP)') + ggplot2.geom_point()
robjects.r.png('out.png')
scatter.plot()
dev_off = robjects.r('dev.off')
dev_off()
Image(filename='out.png')
pd_yri_ceu = pd_common.load_data('yri_ceu')
print(type(pd_yri_ceu))
pd_yri_ceu
del pd_yri_ceu['PAIRED_FASTQ']
no_paired = pd_common.convert_to_r_dataframe(pd_yri_ceu)
robjects.r.assign('no.paired', no_paired)
robjects.r("print(colnames(no.paired))")
###Output
[1]
"STUDY_ID"
"STUDY_NAME"
"CENTER_NAME"
[4]
"SAMPLE_ID"
"SAMPLE_NAME"
"POPULATION"
[7]
"INSTRUMENT_PLATFORM"
"LIBRARY_LAYOUT"
"READ_COUNT"
[10]
"BASE_COUNT"
"ANALYSIS_GROUP"
###Markdown
The next cell will get a ~65 MB data file 'sequence.index', you only need to run the cell once
###Code
!rm sequence.index 2>/dev/null
!wget -nd ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/historical_data/former_toplevel/sequence.index -O sequence.index
###Output
--2016-02-05 15:45:59-- ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/historical_data/former_toplevel/sequence.index
=> 'sequence.index'
Resolving ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)... 193.62.192.8
Connecting to ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)|193.62.192.8|:21... connected.
Logging in as anonymous ... Logged in!
==> SYST ... done. ==> PWD ... done.
==> TYPE I ... done. ==> CWD (1) /vol1/ftp/historical_data/former_toplevel ... done.
==> SIZE sequence.index ... 67069489
==> PASV ... done. ==> RETR sequence.index ... done.
Length: 67069489 (64M) (unauthoritative)
sequence.index 100%[=====================>] 63.96M 562KB/s in 2m 31s
2016-02-05 15:48:34 (434 KB/s) - 'sequence.index' saved [67069489]
###Markdown
Interfacing with R
###Code
import os
from IPython.display import Image
import rpy2.robjects as robjects
import rpy2.robjects.lib.ggplot2 as ggplot2
from rpy2.robjects.functions import SignatureTranslatedFunction
import pandas as pd
import pandas.rpy.common as pd_common
read_delim = robjects.r('read.delim')
seq_data = read_delim('sequence.index', header=True, stringsAsFactors=False)
#In R:
# seq.data <- read.delim('sequence.index', header=TRUE, stringsAsFactors=FALSE)
print('This data frame has %d columns and %d rows' % (seq_data.ncol, seq_data.nrow))
print(seq_data.colnames)
#In R:
# print(colnames(seq.data))
# print(nrow(seq.data))
# print(ncol(seq.data))
print('Columns in Python %d ' % robjects.r.ncol(seq_data)[0])
#access some functions
as_integer = robjects.r('as.integer')
match = robjects.r.match
my_col = match('READ_COUNT', seq_data.colnames)[0] # Vector returned
print('Type of read count before as.integer: %s' % seq_data[my_col - 1].rclass[0])
seq_data[my_col - 1] = as_integer(seq_data[my_col - 1])
print('Type of read count after as.integer: %s' % seq_data[my_col - 1].rclass[0])
my_col = match('BASE_COUNT', seq_data.colnames)[0] # Vector returned
seq_data[my_col - 1] = as_integer(seq_data[my_col - 1])
my_col = match('CENTER_NAME', seq_data.colnames)[0]
seq_data[my_col - 1] = robjects.r.toupper(seq_data[my_col - 1])
robjects.r.assign('seq.data', seq_data)
robjects.r('print(c("Column names in R: ",colnames(seq.data)))')
robjects.r('seq.data <- seq.data[seq.data$WITHDRAWN==0, ]')
#Lets remove all withdrawn sequences
robjects.r("seq.data <- seq.data[, c('STUDY_ID', 'STUDY_NAME', 'CENTER_NAME', 'SAMPLE_ID', 'SAMPLE_NAME', 'POPULATION', 'INSTRUMENT_PLATFORM', 'LIBRARY_LAYOUT', 'PAIRED_FASTQ', 'READ_COUNT', 'BASE_COUNT', 'ANALYSIS_GROUP')]")
#Lets shorten the dataframe
#Population as factor
robjects.r('seq.data$POPULATION <- as.factor(seq.data$POPULATION)')
ggplot2.theme = SignatureTranslatedFunction(ggplot2.theme,
init_prm_translate = {'axis_text_x': 'axis.text.x'})
bar = ggplot2.ggplot(seq_data) + ggplot2.geom_bar() + ggplot2.aes_string(x='CENTER_NAME') + ggplot2.theme(axis_text_x=ggplot2.element_text(angle=90, hjust=1))
robjects.r.png('out.png')
bar.plot()
dev_off = robjects.r('dev.off')
dev_off()
Image(filename='out.png')
#Get Yoruba and CEU
robjects.r('yri_ceu <- seq.data[seq.data$POPULATION %in% c("YRI", "CEU") & seq.data$BASE_COUNT < 2E9 & seq.data$READ_COUNT < 3E7, ]')
yri_ceu = robjects.r('yri_ceu')
scatter = ggplot2.ggplot(yri_ceu) + ggplot2.aes_string(x='BASE_COUNT', y='READ_COUNT', shape='factor(POPULATION)', col='factor(ANALYSIS_GROUP)') + ggplot2.geom_point()
robjects.r.png('out.png')
scatter.plot()
dev_off = robjects.r('dev.off')
dev_off()
Image(filename='out.png')
pd_yri_ceu = pd_common.load_data('yri_ceu')
print(type(pd_yri_ceu))
pd_yri_ceu
del pd_yri_ceu['PAIRED_FASTQ']
no_paired = pd_common.convert_to_r_dataframe(pd_yri_ceu)
robjects.r.assign('no.paired', no_paired)
robjects.r("print(colnames(no.paired))")
###Output
[1]
"STUDY_ID"
"STUDY_NAME"
"CENTER_NAME"
[4]
"SAMPLE_ID"
"SAMPLE_NAME"
"POPULATION"
[7]
"INSTRUMENT_PLATFORM"
"LIBRARY_LAYOUT"
"READ_COUNT"
[10]
"BASE_COUNT"
"ANALYSIS_GROUP"
|
3 Autograd.ipynb | ###Markdown
Automatic differentiation > Automatic differentiation (AD) is software to transform codefor one function into code for the derivative of the function. Example with autograd
###Code
import matplotlib.pyplot as plt
%matplotlib inline
from pylab import rcParams
rcParams['figure.figsize'] = 8, 5
import autograd.numpy as np
from autograd import grad, elementwise_grad
def f(x):
return 3 * x * x * x + 2
f(10.0)
fprime = grad(f)
fprime
fprime(10.0)
X = np.linspace(-5, 5, 100)
plt.plot(X, f(X), label="f")
plt.plot(X, [fprime(x) for x in X], label="f'")
plt.legend()
plt.grid(True)
###Output
_____no_output_____
###Markdown
Autograd is a different library, but it's idea is similar.> We (autograd) want to provide a third way: just write down the loss function using a standard numerical library like Numpy, and Autograd will give you its gradient.[Under the hood](https://github.com/HIPS/autograd/blob/master/docs/tutorial.mdwhats-going-on-under-the-hood):> To compute the gradient, Autograd first has to **record every transformation that was applied to the input** as it was turned into the output of your function. To do this, Autograd wraps functions (using the function primitive) so that when they're called, they add themselves to a list of operations performed. Autograd's core has a **table mapping** these wrapped primitives to their corresponding gradient functions. AD with PyTorch
###Code
import torch
from torch.autograd import Variable
from torchviz import make_dot
###Output
_____no_output_____
###Markdown
What is a variable?Wraps a tensor and operations. Simple example
###Code
# Define variables
# Placeholder? Is the grad a fucntion?
x = Variable(torch.Tensor([1]), requires_grad=True)
w = Variable(torch.Tensor([2]), requires_grad=True)
b = Variable(torch.Tensor([3]), requires_grad=True)
# Build a computational graph.
y = w * x + b # y = 2 * x + 3
y
y.grad_fn
make_dot(y)
# Compute gradients.
y.backward()
# Print out the gradients. y = w * x + b # y = 2 * x + 3
print(x.grad) # x.grad = 2
print(w.grad) # w.grad = 1
print(b.grad) # b.grad = 1
###Output
Variable containing:
2
[torch.FloatTensor of size 1]
Variable containing:
1
[torch.FloatTensor of size 1]
Variable containing:
1
[torch.FloatTensor of size 1]
###Markdown
$\frac{\partial y}{\partial x} = w\\\frac{\partial y}{\partial w} = x\\\frac{\partial y}{\partial b} = 1\\$With x, w, b = [1, 2, 3] the gradients are: 2, 1, 1.
###Code
[x.grad.data, w.grad.data, b.grad.data]
###Output
_____no_output_____
###Markdown
Interpretation:* If we increase x by 1, our output will increase by 2.* If we increase w by 1, our output will increase by 1.* If we increase b by 1, our output will increate by 1. Five node example
###Code
x = Variable(torch.ones(2, 2), requires_grad=True)
x
y = x + 2
y
###Output
_____no_output_____
###Markdown
y is the result of an operation, it has a `grad_fn`
###Code
y.grad_fn # references a Function that has created the Variable
# More operations ....
z = y * y * 3
z
z.grad_fn
# Not just arithmetic.
out = z.mean()
out
# Run backpropagation.
out.backward() # out.backward(torch.Tensor([1.0])) # with respect to some scalar value.
print(x.grad)
###Output
Variable containing:
4.5000 4.5000
4.5000 4.5000
[torch.FloatTensor of size 2x2]
###Markdown
> When using autograd, the forward pass of your network will define a computational graph; nodes in the graph will be Tensors, and edges will be functions that produce output Tensors from input Tensors. Backpropagating through this graph then allows you to easily compute gradients.
###Code
make_dot(out)
x.grad
###Output
_____no_output_____ |
CSC14119 - Introduction to Data Science/Group Project 02 - Data Understanding and Analysis/Source/19120301_19120315_19120331_19120454.ipynb | ###Markdown
Nhập môn Khoa học dữ liệu - Đồ án nhóm 2 Danh sách thành viên|Họ và tên|MSSV|Công việc|| :------ | :---: | :--------- ||Võ Thành Nam|19120301|Mục III||Lương Ánh Nguyệt|19120315|Mục IV||Phạm Lưu Mỹ Phúc|19120331|Mục II||Bùi Quang Bảo|19120454|Mục I| Nội dung và phân công cụ thể:I. Mối quan hệ giữa độ dài, thể loại và mức độ yêu thích của một bài hát (19120454 - Bùi Quang Bảo)* Liệu độ dài bài hát (duration) càng lớn thì bài hát đó có càng được yêu thích? Nếu không, liệu có tồn tại một "độ dài lý tưởng" khiến cho khả năng bài hát được yêu thích cao hơn không?* Những thể loại nhạc nào phổ biến trên SoundCloud? Thể loại nhạc nào được phần đông người nghe yêu thích nhất? Giữa thể loại Hip Hop và thể loại Pop thì thể loại nào được ưa chuộng hơn?II. Mối quan hệ giữa số lượt nghe, độ yêu thích và thể loại (19120331 - Phạm Lưu Mỹ Phúc)* Một bài hát được nghe nhiều lần sẽ có nhiều lượt thích không?* Thể loại có nhiều bài hát nhất có phải sẽ được nghe nhiều nhất hay không? III. Mối liên hệ giữa lượng follower của một user và số lượng lượt thích trung bình mỗi playlist của user đó (19120301 - Võ Thành Nam)* Liệu số lượng follower có nói lên điều gì chất lượng các playlist của một user, và nếu có thì điều đó là gì? (Chất lượng ở đây không phải là chất lượng về mặt chuyên môn, mà là về sự yêu thích của mọi người dành cho playlist đó)IV. Mối quan hệ giữa thời gian đăng, mức độ tương tác và số lượt nghe của bài hát (19120315 - Lương Ánh Nguyệt)* Một bài hát có thời gian đăng đã lâu thì có nhiều lượt tương tác hơn bài hát mới được đăng gần đây hay không? Có khoảng thời gian nào mà những bài hát được đăng vào thời điểm đó có lượng tương tác cao hơn những bài hát được đăng vào thời điểm khác không?* Một bài hát được repost (share lại) nhiều thì có giúp bài hát đó có nhiều lượt nghe hơn không? Nhập thư viện
###Code
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import pandas as pd
from datetime import datetime
import sys
sys.executable
###Output
_____no_output_____
###Markdown
**I. Mối quan hệ giữa độ dài, thể loại và mức độ yêu thích của một bài hát**1. Liệu độ dài bài hát (duration) càng lớn thì bài hát đó có càng được yêu thích? Nếu không, liệu có tồn tại một "độ dài lý tưởng" khiến cho khả năng bài hát được yêu thích cao hơn không?2. Những thể loại nhạc nào phổ biến trên SoundCloud? Thể loại nhạc nào được phần đông người nghe yêu thích nhất? Giữa thể loại Hip Hop và thể loại Pop thì thể loại nào được ưa chuộng hơn?Thực hiện: Bùi Quang Bảo - 19120454Dữ liệu sử dụng: tracks.csv (từ đồ án 1, phương pháp API)Kết luận trong bài làm là kết luận đối với mẫu thu thập được trên nền tảng nghe nhạc SoundCloud, không đảm bảo phản ánh đúng toàn bộ nền tảng SoundCloud nói riêng và toàn bộ thị trường âm nhạc nói chung. Nhập dữ liệu
###Code
df = pd.read_csv('data/tracks.csv')
df.head()
###Output
_____no_output_____
###Markdown
Chỉ giữ lại những thuộc tính mà chúng ta cần sử dụng: "genre", "duration" và "likes_count".
###Code
df = df.loc[:,["genre", "duration", "likes_count"]]
df.fillna(df.median(), inplace=True)
plt.style.use('ggplot')
###Output
_____no_output_____
###Markdown
Mối quan hệ giữa độ dài (duration) và mức độ yêu thích (likes_count) của 1 bài hátHãy cùng xem qua biểu đồ phân tán giữa 2 thuộc tính duration và likes_count:
###Code
# Scatter Plot: duration vs likes_count
fig = plt.figure(figsize=(12,8))
plt.scatter(df["duration"], df["likes_count"], c='steelblue')
plt.xlabel('Duration (millisecond)')
plt.ylabel('Likes Count')
plt.show()
###Output
_____no_output_____
###Markdown
Với biểu đồ phân tán như trên, rất khó quan sát và chúng ta không thể đưa ra kết luận nào. Lí do là bởi tồn tại những track có độ dài (duration) lớn khác thường khiến cho đuôi của biểu đồ lệch sang bên phải rất nhiều.Giải pháp: Chúng ta sẽ tiến hành loại bỏ outliers.Outliers trong trường hợp này, được xác định là:* Những track có độ dài lớn khác thường (ví dụ như [track này](https://soundcloud.com/pineapplealien/rain-sounds-sound-of-rain-mp3-nature-soundsrain-sound-white-noise-for-relaxation-meditation), không phải 1 bài hát mà chỉ đơn thuần là âm thanh tiếng mưa dùng để thư giãn, có độ dài hơn 1 giờ)* Những track có độ dài ngắn bất thường, không phải bài hát mà là sound effects (SFX) hoặc audio do người dùng đăng lên (SoundCloud cho phép người dùng đăng audio của họ, sẽ tồn tại những track không phải bài hát mà chỉ do người dùng đăng thử lên "cho vui" và quên xoá)Phương pháp: Interquartile Range Method (IQR)Tham khảo: https://online.stat.psu.edu/stat200/lesson/3/3.2
###Code
# Remove outliers: Interquartile Range Method (IQR)
Q1, Q3 = df["duration"].quantile(0.25), df["duration"].quantile(0.75)
IQR = Q3 - Q1
cut_off = IQR * 1.5
lower, upper = Q1 - cut_off, Q3 + cut_off
df = df[~((df["duration"] < lower) | (df["duration"] > upper))]
# Scatter Plot: duration vs likes_count
fig = plt.figure(figsize=(12,8))
plt.scatter(df["duration"], df["likes_count"], c='steelblue')
plt.xlabel('Duration (millisecond)')
plt.ylabel('Likes Count')
plt.show()
###Output
_____no_output_____
###Markdown
Với biểu đồ phân tán của dữ liệu sau khi loại bỏ outliers, chúng ta có thể đưa ra một vài quan sát và nhận xét như sau:* **Bài hát có độ dài lớn hơn không có nghĩa là bài hát đó được yêu thích hơn.** Điều này đã trả lời cho câu hỏi: "Liệu độ dài bài hát (duration) càng lớn thì bài hát đó có càng được yêu thích?"* Phần lớn những bài hát có số lượng like lớn có độ dài nằm trong khoảng từ 100000ms (1 phút 40 giây) đến 300000ms (5 phút). Bằng quan sát, chúng ta nhận thấy rằng **những bài hát có số lượng like lớn có độ dài xoay quanh 200000ms (3 phút 20 giây)**. Điều này khá đúng với thực tế khi mà những bài hát mới ra *thường* dài từ 3 đến 4 phút. Tuy nhiên, vẫn **không** thể kết luận đây là "độ dài lý tưởng" để một bài hát được yêu thích hơn, bởi vì một bài hát hay còn phụ thuộc vào rất nhiều yếu tố khác, và chúng ta chỉ đang xét 1 mẫu các bài hát trên nền tảng SoundCloud (So với Spotify thì SoundCloud có rất nhiều nghệ sĩ tự do, không chuyên, cũng có thể ảnh hưởng đến kết luận này). Sự phổ biến (số lượng bài hát) và độ ưa chuộng (số lượng likes) đối với các thể loại nhạc Hãy cùng xem qua các thể loại âm nhạc (genre):
###Code
print(f"Số lượng thể loại: {len(df['genre'].unique())}")
print("Danh sách thể loại:")
print(df['genre'].unique())
###Output
Số lượng thể loại: 208
Danh sách thể loại:
[nan 'Lo-Fi Hip Hop' 'rain' 'Lo-fi' 'beats' 'Drum & Bass' 'Hip Hop'
'fast and furious' 'Hip-hop & Rap' 'Comedy' 'Pop' 'Dance & EDM' 'Phonk'
'Jazz' 'Electro Swing' 'Alternative Rock' 'Indie' 'Electronic' 'PHONK'
'KREEP' 'Metal' 'Soundtrack' 'Rock' 'experimental' 'Country'
'Rap/Hip Hop' 'Classical' 'Rap' 'R&B' 'R & B' 'R&B/Soul' 'NC' 'Lexington'
'meme' 'Undertale - Last Breath' 'Trailer Music' 'Hardstyle' 'cover'
'Speaker Knockerz' 'All' 'two against one' 'Irish Drill Music'
'irishdrillmusic' 'K-Pop' 'R&B & Soul' 'calvin' 'martin solveig'
'Progressive House' 'Dance' 'steveaoki' 'House' 'XO' 'good vibes'
'Indie Trap' 'Real Music' 'Anime' 'Tech House' 'funk' 'Baile do ana'
'Rap/Hip-Hop' 'The Neighbourhood, ' 'Hip-hop/Rap' '"the system' 'other'
'Ballad' 'driven to tears' 'Light' 'Shere Khan' 'Música do Mundo'
'Reggae' 'Pop-Folk' 'Funk' 'Singer Songwriter' 'STP' 'BlueOysterCult '
'Melody' 'GalaxyHop' 'Dance/HipHop' 'country' 'rock n roll'
'Vocal/Nostalgia' 'ingrid michaelson' 'Nightcore' 'AM'
'Folk & Singer-Songwriter' 'Alternative' 'Editing' 'EDITED' 'edited'
'Trap' 'Oldschool' 'BEACH HOUSE' 'meditacion' 'Blues' 'dillonfrancis'
'Classical Piano' 'Brazilian' 'Trap Brasileiro ®' 'FLUXO' 'Chill House'
'NstyTdw' 'Tropical House' 'Latin' 'Cumbia' 'Techno' 'Piano' 'Banda'
'Sonta' 'Electro\\ House' 'Electro House' 'Pain, Pulse, & Energy'
'Gaspare Music' 'TRIPLESIXDELETE' 'SoFaygo' 'TGOD' 'Hip Hop/Rap' 'hiphop'
'Nirvana drum cover' 'Hard Rock' 'Thunderstruck' 'acdc' 'Hiphoprnb'
'Dancehall' 'Bryson Tiller' 'uk rap' 'gfn' 'HafaAdai' 'Dubstep'
'DJ BRENIN' 'Hiphop' 'Lofihiphop' 'Rap e Hip Hop' 'Nocaute'
'eu sosseguei' 'Sertanejo' 'Piseiro' 'Technology' 'piseiro' 'Pagode'
'spirithiphop' 'Religion & Spirituality' 'Rach44.5' 'Ambient'
'BLACKLIVESMATTER' 'lofihiphop' 'Lofi Hiphop' 'CYBERTRAP' 'NAchaT'
'World' 'Anime Rap' 'John' 'pagcor 5' 'melanie martinez' 'Latina'
'Reggaeton' 'lilpeep' '2019' 'Cash Out' 'ca$h out' 'circle' 'Go-Go'
'SEHARUSNYA AKU - MAULANA WIJAYA [Official Music Soundcloud]'
'Radio Pasisia Online ( R P O ) Pemersatu' 'Hip-Hop' 'TRAP' 'مهرجان'
'Mutiara Hikmah' 'nostalgia' 'Kpop' 'TREAD' 'lilgreaf' 'Candomblé'
'Umbanda' 'Sagaranna' 'candomblé' 'Brazilian Music' 'Relegious'
'Jazz, R&B, Classic Rock' 'Soul' 'electronic dance music' 'FDT'
'Mazzy Star' 'sami' 'Jesus Adrian Romero' 'MenungguPagi' 'Pokemon Gold'
'Deep House' 'pop' 'Psytrance' 'hi-tech' 'Trance' 'tropical' 'Psystyle'
'Minimal\\ Tech House' 'Hitech' 'Hi-Tech' '1WAYCAMP' 'Toosii'
'Acoustic Gospel' 'FUNK' 'Dalãma Produções' 'funk/mg' 'Country Rap'
'Inspirational' 'Elevation' 'Contemporary Christian' 'ادم' 'Hiphop/rap']
###Markdown
Chúng ta đang gặp phải 1 vấn đề là: SoundCloud cho phép người dùng tự định nghĩa thể loại âm nhạc cho track của mình, vì thế ở mẫu xuất hiện rất nhiều các thể loại "lạ" (ví dụ như "Real Music" hay "Pokemon Gold").Ngoài ra, một số thể loại còn được viết với những cách viết khác, ví dụ như: "Hip-hop & Rap" và "Rap/Hip Hop".Vì thế ở khuôn khổ đồ án này, chúng ta sẽ chỉ xem xét 10 thể loại phổ biến nhất, khi tính toán sẽ không xét đến ý nghĩa, và coi những thể loại như "Hip-hop & Rap" và "Hip Hop" là những thể loại khác nhau.
###Code
# Value Count
popular_genres_and_count = df['genre'].value_counts()[:10]
popular_genres = popular_genres_and_count.index.tolist()
# Pie plot: Genres with count
fig, ax = plt.subplots()
fig.set_figwidth(7)
fig.set_figheight(7)
ax.pie(list(popular_genres_and_count), labels=popular_genres, autopct='%1.1f%%', startangle=90)
ax.axis('equal')
plt.title('Popular Music Genres', y=1.04)
plt.show()
###Output
_____no_output_____
###Markdown
Chúng ta có thể thấy rõ rằng **thể loại Hip-hop (nói chung) khá phổ biến và chiếm tỉ trọng lớn trong số các bài hát**.Tuy nhiên, liệu phổ biến hơn thì có được yêu thích/ưa chuộng hơn?Ở đây, chúng ta sẽ xem xét mức độ yêu thích của 1 thể loại thông qua giá trị trung vị (median) của thuộc tính "likes_count" của tất cả các bài hát thuộc thể loại đó.
###Code
loved_genres = df[["genre", "likes_count"]][df["genre"].isin(popular_genres)].groupby('genre').agg('median').sort_values(
by = "likes_count",
ascending = False
)
loved_genres = loved_genres.reset_index()
loved_genres = loved_genres.rename({'genre': 'Music Genre', 'likes_count': 'Median Likes Count'}, axis=1)
loved_genres
###Output
_____no_output_____
###Markdown
Chúng ta có thể đưa ra một số nhận xét như sau:* Thể loại Pop dù phổ biến, có nhiều bài hát hơn Rock và Country nhưng lại không được yêu thích bằng.* **Thể loại Hip-hop (nói chung) vừa phổ biến nhất, vừa được yêu thích nhất.*** Chúng ta cũng thấy được rằng, **giữa thể loại Hip Hop và thể loại Pop thì thể loại Hip Hop được ưa chuộng hơn**.Hãy cùng xem lại biểu đồ phân tán, nhưng lần này bài hát thuộc thể loại Hip Hop sẽ có màu đỏ và bài hát thuộc thể loại Pop sẽ có màu xanh:
###Code
colors = []
for lab, row in df.iterrows() :
if row["genre"] == "Hip Hop":
colors.append("crimson") # red
elif row["genre"] == "Pop":
colors.append("mediumblue") # blue
else:
colors.append("None")
# Scatter Plot: duration vs likes_count, red is Hip Hop and blue is Pop
fig = plt.figure(figsize=(12,8))
plt.scatter(df["duration"], df["likes_count"], c=colors)
plt.xlabel('Duration (millisecond)')
plt.ylabel('Likes Count')
red_dot = mlines.Line2D([], [], color='crimson', marker='o', linestyle='None', markersize=6, label='Genre: Hip Hop')
blue_dot = mlines.Line2D([], [], color='mediumblue', marker='o', linestyle='None', markersize=6, label='Genre: Pop')
plt.legend(handles=[red_dot, blue_dot], loc = 'upper right')
plt.show()
###Output
_____no_output_____
###Markdown
Đúng với kết luận ở trên, những bài hát thuộc thể loại Hip Hop (màu đỏ) thường được yêu thích hơn những bài hát thuộc thể loại Pop (màu xanh). Kết luận:* Liệu độ dài bài hát (duration) càng lớn thì bài hát đó có càng được yêu thích? * Trả lời: Không. Bài hát có độ dài lớn hơn không có nghĩa là bài hát đó được yêu thích hơn.* Liệu có tồn tại một "độ dài lý tưởng" khiến cho khả năng bài hát được yêu thích cao hơn không? * Trả lời: Có thể. Ở mẫu đang xét, những bài hát có số lượng like lớn có độ dài xoay quanh 3 phút 20 giây. Tuy nhiên mẫu khá nhỏ nên đây không phải là kết luận.* Những thể loại nhạc nào phổ biến trên SoundCloud? * Trả lời: Thể loại Hip Hop (nói chung) phổ biến nhất, theo sau đó là các thể loại như Pop, Country, Rock, Dance & EDM,...* Thể loại nhạc nào được phần đông người nghe yêu thích nhất? * Trả lời: Thể loại Hip Hop (nói chung) được người nghe ưa chuộng nhất.* Giữa thể loại Hip Hop và thể loại Pop thì thể loại nào được ưa chuộng hơn? * Trả lời: Thể loại Hip Hop. **II. Mối quan hệ giữa số lượt nghe, độ yêu thích và thể loại**Câu hỏi:1. Một bài hát được nghe nhiều lần sẽ có nhiều lượt thích hay không 2. Thể loại có nhiều bài hát nhất có phải sẽ được nghe nhiều nhất hay không Người thực hiện: Phạm Lưu Mỹ Phúc - 19120331 Dữ liệu sử dụng: track.csv (từ đồ án 1, phương pháp API)Dữ liệu trong bài chỉ phản ánh trong mẫu được thu thập. Không thể thể hiện cho toàn bộ nền tảng Soundcloud hay thị trường âm nhạc Các bước cần thực hiện:* Nhập dữ liệu* Tiền xử lý dữ liệu: xử lý các dòng dữ liệu thiếu * Tiến hành phân tích dữ liệu và trực quan hóa
###Code
df = pd.read_csv('data/tracks.csv')
print(df.shape)
df.head()
df = df.loc[:,["genre", "playback_count", "likes_count"]]
df = df.dropna()
df.shape
###Output
_____no_output_____
###Markdown
Khi thực hiện xóa các dòng có dữ liệu là nan thì dữ liệu bị xóa 241 dòng (~25%), số lượng dòng còn lại vẫn chấp nhận được nên ta thực hiện phân tích trên tập dữ liệu còn lại mà không thay thế giá trị.Trong phần này ta chỉ xét đến sự tương quan giữa số lượt nghe, độ yêu thích và thể loại nên chỉ cần lấy 3 cột `genre`, `playback_count` và `likes_count`
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
Kiểu dữ liệu của 3 cột ta đang xét đều đã phù hợp để thực hiện phân tích.
###Code
df.describe().round(1)
###Output
_____no_output_____
###Markdown
Nhận xét chung các cột dữ liệu - Số lượt nghe ít nhất là 0 và nhiều nhất là ~241 triệu lượt. Như vậy tập dữ liệu ta đang xét được phân bố khá rộng và đầy đủ các loại người dùng từ phổ thông đến ca sĩ.- Số lượt thích ít nhất là 0 và nhiều nhất là ~3 triệu lượt thích. Mối quan hệ giữa số lượt nghe với độ yêu thích của bài hát
###Code
plt.style.use('ggplot')
fig = plt.figure(figsize=(12,8))
plt.scatter(df["playback_count"], df["likes_count"], c='steelblue')
plt.xlabel('Playbacks Count')
plt.ylabel('Likes Count')
plt.show()
###Output
_____no_output_____
###Markdown
Với biểu đồ phân tán dữ liệu trên, ta có thể đưa ra vài nhận xét như sau:* Bài hát được nghe càng nhiều thì lượt yêu thích cũng càng nhiều. Không có trường hợp bài hát có nhiều lượt nghe nhưng có ít lượt thích.* Số lượt nghe tập trung nhiều ở **<5000000** * Số lượng bài hát có lượt nghe **>15000000** không nhiều Mối quan hệ giữa thể loại và số lượt nghe của bài hát Đầu tiên, ta tìm hiểu thể loại nào xuất hiện trong tập dữ liệu này.
###Code
df['genre'].unique()
###Output
_____no_output_____
###Markdown
Do thể loại được tự định nghĩa nên phần thể loại xuất hiện nhiều thể loại lạ, ví dụ: "two against one", "shere khan". Vì vậy, ta chỉ lấy 10 thể loại phổ biến nhất (có nhiều bài hát) để phân tích và trả lời câu hỏi, liệu các thể loại phổ biến sẽ có nhiều lượt nghe hơn không?
###Code
genre_count = df['genre'].value_counts()[:10].index.tolist()
df['genre'].value_counts()[:10]
###Output
_____no_output_____
###Markdown
Thể loại Hip Hop nằm ở vị trí thứ 2 cũng có thể xem là một phần của thể loại Hip Hop & Rap vì thế có thể nhận xét rằng Hip Hop & Rap chiếm ưu thế hơn hẳn so với các thể loại còn lại.Đây chỉ là nhận xét trên một mẫu thu thập được trên Soundcloud. Mẫu này không thể phản ánh thực tế trên toàn bộ thị trường âm nhạc hiện nay. Tiếp theo, ta xét độ yêu thích của một thể loại thông qua giá trị trung bình (mean) của thuộc tính playback_count của tất cả bài hát thuộc thể loại này và trả lời câu hỏi liệu một thể loại phổ biến sẽ có nhiều lượt nghe hơn hay không.
###Code
df_popular_genre = df.loc[df['genre'].isin(genre_count)]
df_genre_count=df_popular_genre['genre'].value_counts()
df_popular_genre = df_popular_genre.filter(items=['genre','playback_count']).groupby(by="genre").mean()
genre_playbackcount_df = pd.concat([df_popular_genre,df_genre_count],axis=1).sort_values(by=['playback_count'],ascending=False)
genre_playbackcount_df.rename(columns={'genre':'song_count'},inplace=True)
genre_playbackcount_df.astype({'playback_count': 'int64', 'song_count': 'int64'})
###Output
_____no_output_____
###Markdown
Ta có thể đưa ra nhận xét như sau: * Thể loại Hip Hop & Rap vẫn chiếm ưu thế khi vừa có nhiều bài hát nhất và vừa có nhiều lượt nghe với số lượng vượt trội hơn các thể loại còn lại. * Thể loại Alternative Rock tuy có số lượng bài hát ít nhất nhưng lại có lượt nghe cao thứ 2 và chênh lệch rất lớn so với các thể loại còn lại. * Ngược lại, thể loại Pop có nhiều bài hát thứ 2 nhưng lại có số lượt nghe thấp nhất Tiếp theo, ta trực quan hóa dữ liệu vừa được phân tích lên biểu đồ tròn bên dưới để dễ đưa ra nhận xét
###Code
# Value Count
popular_genres_and_count = genre_playbackcount_df['playback_count']
popular_genres = genre_playbackcount_df.index.tolist()
# Pie plot: Genres with count
fig, ax = plt.subplots()
fig.set_figwidth(9)
fig.set_figheight(9)
ax.pie(list(popular_genres_and_count), labels=popular_genres, autopct='%1.1f%%', startangle=90)
ax.axis('equal')
plt.title('Playback', y=1.04)
plt.show()
###Output
_____no_output_____
###Markdown
Kết luận1. Bài hát được nghe càng nhiều thì lượt yêu thích cũng càng nhiều: * Các bài hát càng có nhiều người nghe thì càng có nhiều lượt thích. Không có trường hợp bài hát có nhiều lượt nghe nhưng có ít lượt thích. 2. Sau khi so sánh giữa số lượng bài hát (độ phổ biến) và số lượt nghe của các thể loại thì có thể rút ra kết luận: * Thể loại Hip Hop & Rock vừa có nhiều bài hát nhất vừa có nhiều lượt nghe nhất. * Một thể loại phổ biến (nhiều bài hát) không có nghĩ thể loại ấy sẽ có nhiều lượt nghe. * Có sự tương phản khi có nhiều thể loại dù phổ biến hơn nhưng lại có lượt nghe ít hơn so với thể loại ít phổ biến (ít bài hát hơn) **III. Trong phần này, ta sẽ tìm hiểu về mối liên hệ giữa lượng follower của một user và số lượng lượt thích trung bình mỗi playlist của user đó.**Câu hỏi được đặt ra ở đây là: - Liệu số lượng follower có nói lên điều gì chất lượng các playlist của một user, và nếu có thì điều đó là gì? (Chất lượng ở đây không phải là chất lượng về mặt chuyên môn, mà là về sự yêu thích của mọi người dành cho playlist đó)Bộ dữ liệu được sử dụng: users.csvĐể tính độ chất lượng của các playlist, ta tính số lượng lượt thích trung bình của các playlist.> Lượt thích trung bình = Tổng số lượt thích các playlist / Tổng số playlist.Các bước cần thực hiện:- Nhập dữ liệu vào và xem xét các thông tin chung về dữ liệu (dữ liệu có bị thiếu hay có bất thường không, phân bố như thế nào,...)- Thực hiện tiền xử lí: lọc loại bỏ các dữ liệu bất thường hoặc dữ liệu lỗi nếu có.- Tiến hành phân tích bằng cách xem xét tương quan, các chỉ số và vẽ biểu đồ thể hiện các tương quan đó.**Nhập dữ liệu vào**
###Code
users_df=pd.read_csv('data/users.csv')
users_df.head()
###Output
_____no_output_____
###Markdown
Ta cần sử dụng các thuộc tính `followers_count, playlist_likes_count, playlist_count`, do đó ta chỉ lấy ra các cột dữ liệu này.
###Code
data=users_df.loc[:,['followers_count','playlist_likes_count','playlist_count']]
###Output
_____no_output_____
###Markdown
Xem các kiểu dữ liệu đã ở dạng số hết hay chưa.
###Code
data.dtypes
###Output
_____no_output_____
###Markdown
Ta sẽ xem qua một số thông tin từ các dữ liệu đã lấy được, bao gồm tổng số lượng, số lượng thông tin bị thiếu, ...
###Code
percent_missing = data[data.describe().columns.tolist()].isnull().sum()
nume_col_info_df=percent_missing.to_frame(name='missing_count').transpose()
nume_col_info_df=nume_col_info_df.append(data.describe()).round(1)
nume_col_info_df
###Output
_____no_output_____
###Markdown
**Nhận xét chung về các cột dữ liệu**Như vậy có thể thấy, cả 3 cột thông tin đều không bị thiếu dữ liệu.Số lượng follower thấp nhất và cao nhất có thể thấy là 0 và 733. 75% của cột followers_count là 1. Như vậy ta dự đoán, trong tập dữ liệu đang xét, hầu hết các user đều là những người dùng phổ thông, không phải những nghệ sĩ hay những người nổi tiếng - những người sẽ có số lượng follower lớn hơn rất nhiều.Ở cột playlist_count, ta thấy lượng playlist thấp nhất là 1, như vậy ta có thể trực tiếp chia cột `playlist_likes_count` cho cột `playlist_count` để lấy số lượng lượt thích trung bình mà không cần phải xử lí gì thêm.
###Code
# Thêm cột lượt thích trung bình
data['LikesPerPlaylist']=data['playlist_likes_count']/data['playlist_count']
fig = plt.figure(figsize=(12,6))
plt.scatter(data['followers_count'], data['LikesPerPlaylist'], c='steelblue')
plt.xlabel('Followers')
plt.ylabel('Likes/Playlist')
plt.xticks(range(0,max(data['followers_count']),50))
plt.show()
###Output
_____no_output_____
###Markdown
Bây giờ, ta sẽ xét xem những người dùng phổ thông thì liệu có thể tạo ra những playlist chất lượng cho cộng đồng hay không.Có thể thấy trong biểu đồ trên, lượng user có số lượng follower lớn (tạm xét ở mức >50 followers) là không nhiều, nhìn bằng mắt thường cũng có thể thấy chỉ khoảng 7 người. Điều này cho thấy trong bộ dữ liệu thu thập được không có những nghệ sĩ hay những người nổi tiếng, những người có thể thu hút nhiều thính giả hơn, mà hầu hết chỉ là những người dùng bình thường, có lượng follower rất thấp. Như vậy, dự đoán của ta về user trong tập dữ liệu này là đúng. Do số lượng users có follower lớn không quá nhiều nên ta sẽ loại bỏ những user này và xét những user có số lượng follower dưới 50.
###Code
normal_users=data.drop(data[data['followers_count']>=50].index)
###Output
_____no_output_____
###Markdown
Tiếp theo, ta sẽ vẽ biểu đồ tương quan giữa `followers_count` và `LikesPerPlaylist`.
###Code
fig = plt.figure(figsize=(12,6))
plt.scatter(normal_users['followers_count'], normal_users['LikesPerPlaylist'], c='steelblue')
plt.xlabel('Followers')
plt.ylabel('Likes/Playlist')
plt.xticks(range(0,max(normal_users['followers_count']),1))
plt.show()
###Output
_____no_output_____
###Markdown
Như vậy, có thể thấy rõ hơn rằng, số lượng followers của các users trên thực tế chỉ tập trung ở mức dưới 10. Và những users này cũng chỉ có lượng LikesPerPlaylist tập trung ở mức dưới 20. Chỉ có 1 vài ngoại lệ duy nhất có số lượng Likes/Playlist là lớn.Ta sẽ thực hiện loại bỏ các outlier của mỗi nhóm bằng phương pháp 2 STD và tính trung bình của mỗi nhóm này.
###Code
plt.figure(figsize=(12,6))
# Loại bỏ các outlier
def is_outlier(s):
lower_limit = s.mean() - (s.std() * 2)
upper_limit = s.mean() + (s.std() * 2)
# Q1, Q3 = s.quantile(0.25), s.quantile(0.75)
# IQR = Q3 - Q1
# cut_off = IQR * 1.5
# lower_limit, upper_limit = Q1 - cut_off, Q3 + cut_off
return ~s.between(lower_limit, upper_limit)
stat = normal_users[~normal_users.groupby('followers_count')['LikesPerPlaylist'].apply(is_outlier)]
# Tính trung bình
stat=stat.groupby('followers_count').mean().reset_index()
# Vẽ đồ thị
plt.xticks(stat['followers_count'])
plt.xlabel('Followers')
plt.ylabel('Average Likes/Playlist')
plt.bar(stat['followers_count'],stat['LikesPerPlaylist']);
###Output
_____no_output_____
###Markdown
Như vậy, đến đây chúng ta có thể rút ra một vài kết luận:- Mặt bằng chung thì số lượng like trên mỗi playlist ở nhóm user dưới 50 follower là khá thấp và chênh lệch gần như không đáng kể nếu số lượng follower thay đổi trong khoảng này.- Trong bộ dữ liệu đang xét, chỉ có duy nhất một ngoại lệ (35 likes/playlist/2-follower) cho thấy việc dù được ít follower nhưng vẫn có số lượng lượt thích trung bình trên các playlist là lớn (nếu so với mặt bằng chung của nhóm user đang xét).- Với những user có lượng follower lớn hơn nhóm đã xét ở trên, chúng ta chưa thể có kết luận rằng số likes/playlist có lớn hơn hay không.Quay trở lại câu hỏi ban đầu, kết hợp với dữ liệu đã phân tích, chúng ta chỉ có thể trả lời rằng ở mức follower thấp thì chất lượng các playlist là thấp, nhưng không kết luận được sự ảnh hưởng số lượng follower đến điều này. **IV. Mối quan hệ giữa thời gian đăng, mức độ tương tác và số lượt nghe của bài hát**Câu hỏi:1. Một bài hát có thời gian đăng đã lâu thì có nhiều lượt tương tác hơn bài hát mới được đăng gần đây hay không? Có khoảng thời gian nào mà những bài hát được đăng vào thời điểm đó có lượng tương tác cao hơn những bài hát được đăng vào thời điểm khác không?2. Một bài hát được repost (share lại) nhiều thì có giúp bài hát đó có nhiều lượt nghe hơn không?* *Tương tác* của một bài hát ở đây sẽ được tính bằng tổng *like*, *comment*, *repost (lượt share)*, *playback (lượt nghe)*Người thực hiện: Lương Ánh Nguyệt - 19120315Dữ liệu sử dụng: tracks.csv (dữ liệu thu thập bằng API trong đồ án 1) Lấy dữ liệu
###Code
df = pd.read_csv('data/tracks.csv')
df.head()
###Output
_____no_output_____
###Markdown
1. Mối quan hệ giữa thời gian đăng và độ tương tác của bài hát Tiền xử lý * Trong phần này, ta chỉ lấy dữ liệu từ cột `created_at`,`comment_count`, `likes_count`,`playback_count` và `reposts_count`.
###Code
data = df.loc[:,['created_at','comment_count','likes_count','playback_count','reposts_count']]
data
###Output
_____no_output_____
###Markdown
* Kiểm tra xem liệu có dữ liệu trống không? (missing values)
###Code
data.isna().sum()
###Output
_____no_output_____
###Markdown
Vì missing values chỉ chiếm 1 phần rất nhỏ (3/1000 dữ liệu) nên ta loại bỏ luôn những dòng có missing values.
###Code
data.dropna(inplace=True)
###Output
_____no_output_____
###Markdown
* Tiếp theo, ta kiểm tra kiểu dữ liệu của các cột.
###Code
data.dtypes
###Output
_____no_output_____
###Markdown
Để dễ dàng làm việc và nhìn dữ liệu được đẹp hơn, ta nên chuyển cột `created_at` về kiểu `datetime` (chỉ cần lấy ngày tháng năm, không cần lấy thời gian), và các cột `comment_count`, `likes_count`, `playback_count` thành kiểu `int`.
###Code
data = data.astype({'created_at':np.datetime64, 'comment_count':np.int64, 'likes_count':np.int64, 'playback_count':np.int64})
data.created_at = data.created_at.dt.normalize()
data.dtypes
###Output
_____no_output_____
###Markdown
* Ta không sử dụng riêng từng cột `comment_count`, `likes_count`, `playback_count`, `reposts_count` mà cần tính tổng chúng lại để đánh giá thành mức độ tương tác của bài hát, đặt làm cột `interactions`.* Cột `time` lưu số ngày kể từ lúc bài hát được tạo (created_at) đến thời điểm hiện tại (lấy ngày `12-12-2021`, thời điểm đồ án này được thực hiện)
###Code
data['time'] = (np.datetime64('2021-12-12') - data['created_at']).dt.days
data['interactions'] = [0]*len(data.index)
for col in ['comment_count', 'likes_count', 'playback_count', 'reposts_count']:
data['interactions'] = data['interactions'].add(data[col])
data
###Output
_____no_output_____
###Markdown
Phân tích dữ liệu Ta có biểu đồ phân tán giữa 2 thuộc tính `time` và `interactions` như sau:
###Code
plt.figure(figsize=(12,8))
plt.scatter(data['time'], data['interactions'])
plt.xlabel('Time (days)')
plt.ylabel('Interactions')
plt.show()
###Output
_____no_output_____
###Markdown
Với biểu đồ phân tán trên, ta có thể trả lời cho câu hỏi 1 và rút ra một số nhận xét như sau:* Một bài hát có thời gian đăng đã lâu **không có nghĩa** là bài đó sẽ có nhiều lượt tương tác hơn bài hát mới được đăng.* Bài hát đã được đăng rất lâu (cách đây 10-11 năm ~ khoảng hơn 4000 ngày) thì có lượng tương tác rất ít. Có thể vào khoảng thời gian đó, Soundcloud chưa được phổ biến nên chưa có nhiều người dùng để tương tác. Và đến thời điểm hiện tại, những bài hát đó quá xưa cũ nên cũng ít người biết đến, khó có thể tăng tương tác.* **Những bài hát có lượt tương tác cao vượt trội nằm trong khoảng cách đây 3-5 năm (~ 1000-2000 ngày)**. Điều này có thể xem là hợp lý vì vào khoảng thời gian đó là lúc công nghệ đã trở nên phổ biến, có nhiều người dùng dẫn đến nhiều tương tác hơn, và lượng tương tác đó sẽ được tích lũy dần qua thời gian.* Tuy nhiên, ta cũng **không thể khẳng định** những bài hát được đăng trong khoảng thời gian cách đây 3-5 năm thì sẽ có tương tác cao hơn những bài hát được đăng vào khoảng thời gian khác, vì những bài hát có lượng tương tác cao hơn hẳn chỉ là số ít, và số lượt tương tác còn tùy thuộc vào độ phổ biến, độ hay dở,... của bài hát.* **Phần lớn các bài hát đều có khoảng < 40 triệu lượt tương tác, không phân biệt thời gian bài hát được đăng là khi nào** (các điểm tụ tập nhiều và trải dài ở dưới đáy biểu đồ). 2. Mối quan hệ giữa lượt chia sẻ (repost) và lượt nghe (playback) của bài hát Tiền xử lý Trong phần này, ta sẽ lấy dữ liệu từ cột `playback_count` và `reposts_count` để phân tích
###Code
data = df.loc[:,['playback_count','reposts_count']]
data
###Output
_____no_output_____
###Markdown
* Ta kiểm tra xem có missing values không?
###Code
data.isna().sum()
###Output
_____no_output_____
###Markdown
Chỉ có 1 dữ liệu bị thiếu nên ta sẽ loại bỏ luôn dòng dữ liệu này đi.
###Code
data.dropna(inplace=True)
###Output
_____no_output_____
###Markdown
* Quan sát qua dữ liệu, ta thấy được cột `playback_count` thuộc kiểu `float`. Ta nên chuyển nó về kiểu `integer` để khi biểu diễn được dễ nhìn.
###Code
data = data.astype({'playback_count':np.int64})
data.dtypes
###Output
_____no_output_____
###Markdown
Phân tích dữ liệu * Biểu đồ phân tán giữa *repost (lượt chia sẻ)* và *playback (lượt nghe)*:
###Code
plt.figure(figsize=(12,8))
plt.scatter(data['reposts_count'], data['playback_count'])
plt.xlabel('Repost')
plt.ylabel('Playback')
plt.show()
###Output
_____no_output_____
###Markdown
Qua biểu đồ trên, ta thấy được có một số bài hát có lượng repost quá cao. Điều này đã dẫn đến việc biểu đồ bị lệch về phía bên trái, khiến ta khó có thể quan sát và nhận xét.Vì vậy, chúng ta cần phải loại bỏ đi outliers. Ở đây, ta sử dụng phương pháp dùng **khoảng tứ phân vị** (Interquartile Range Method)
###Code
# IQR
Q1 = data['reposts_count'].quantile(0.25)
Q3 = data['reposts_count'].quantile(0.75)
IQR = Q3 - Q1
x = IQR * 1.5
repost_start = Q1 - x
repost_end = Q3 + x
# Loại bỏ outlier
data = data[((data['reposts_count']>=repost_start) & (data['reposts_count']<=repost_end))]
###Output
_____no_output_____
###Markdown
* Biểu đồ mới sau khi loại bỏ outlier:
###Code
plt.figure(figsize=(12,8))
plt.scatter(data['reposts_count'], data['playback_count'])
plt.xlabel('Repost')
plt.ylabel('Playback')
plt.show()
###Output
_____no_output_____ |
testscripts/testscript_layout.ipynb | ###Markdown
Test scriptThis is a basic layout of test script to run on recommendation datasets. Make sure the dataset passed is in CSR format with first 3 entries as [row_id, col_id, rating, ...] and ratings vary between 1 - 5. Else, you might need to make changes in the original jupyter notebook (finalcode.ipynb). If you just want to give a CSR dataset as input (which has ratings between 1 - 5) and evaluate the performance of THE algorithm, then please use test script: testscript_std Running the main Jupyter notebook which has all the functions defined. Make sure the path is correct in next cell
###Code
# if this way of importing another jupyter notebook fails for you
# then you can use any one of the many methods described here:
# https://stackoverflow.com/questions/20186344/ipynb-import-another-ipynb-file
%run '../src/finalcode.ipynb'
#from datetime import datetime
#datetime.now().time() # (hour, min, sec, microsec)
###Output
_____no_output_____
###Markdown
Setting constants Read and prepare the dataset Make predictions using THE algorithm Step 1: Sample splitting Step 2: Expanding the Neighborhood Step 3: Computing the distances Step 4: Averaging datapoints to produce final estimate Evaluate the predictions
###Code
#datetime.now().time() # (hour, min, sec, microsec)
###Output
_____no_output_____ |
LSI.ipynb | ###Markdown
###Code
#import modules
import os.path
from gensim import corpora
from gensim.models import LsiModel
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from gensim.models.coherencemodel import CoherenceModel
import matplotlib.pyplot as plt
def load_data(path,file_name):
"""
Input : path and file_name
Purpose: loading text file
Output : list of paragraphs/documents and
title(initial 100 words considred as title of document)
"""
documents_list = []
titles=[]
with open( os.path.join(path, file_name) ,"r") as fin:
for line in fin.readlines():
text = line.strip()
documents_list.append(text)
print("Total Number of Documents:",len(documents_list))
titles.append( text[0:min(len(text),100)] )
return documents_list,titles
def preprocess_data(doc_set):
"""
Input : docuemnt list
Purpose: preprocess text (tokenize, removing stopwords, and stemming)
Output : preprocessed text
"""
# initialize regex tokenizer
tokenizer = RegexpTokenizer(r'\w+')
# create English stop words list
en_stop = set(stopwords.words('english'))
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
# list for tokenized documents in loop
texts = []
# loop through document list
for i in doc_set:
# clean and tokenize document string
raw = i.lower()
tokens = tokenizer.tokenize(raw)
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
# stem tokens
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
# add tokens to list
texts.append(stemmed_tokens)
return texts
def prepare_corpus(doc_clean):
"""
Input : clean document
Purpose: create term dictionary of our courpus and Converting list of documents (corpus) into Document Term Matrix
Output : term dictionary and Document Term Matrix
"""
# Creating the term dictionary of our courpus, where every unique term is assigned an index. dictionary = corpora.Dictionary(doc_clean)
dictionary = corpora.Dictionary(doc_clean)
# Converting list of documents (corpus) into Document Term Matrix using dictionary prepared above.
doc_term_matrix = [dictionary.doc2bow(doc) for doc in doc_clean]
# generate LDA model
return dictionary,doc_term_matrix
def create_gensim_lsa_model(doc_clean,number_of_topics,words):
"""
Input : clean document, number of topics and number of words associated with each topic
Purpose: create LSA model using gensim
Output : return LSA model
"""
dictionary,doc_term_matrix=prepare_corpus(doc_clean)
# generate LSA model
lsamodel = LsiModel(doc_term_matrix, num_topics=number_of_topics, id2word = dictionary) # train model
print(lsamodel.print_topics(num_topics=number_of_topics, num_words=words))
return lsamodel
def compute_coherence_values(dictionary, doc_term_matrix, doc_clean, stop, start=2, step=3):
"""
Input : dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
stop : Max num of topics
purpose : Compute c_v coherence for various number of topics
Output : model_list : List of LSA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, stop, step):
# generate LSA model
model = LsiModel(doc_term_matrix, num_topics=number_of_topics, id2word = dictionary) # train model
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=doc_clean, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values
def plot_graph(doc_clean,start, stop, step):
dictionary,doc_term_matrix=prepare_corpus(doc_clean)
model_list, coherence_values = compute_coherence_values(dictionary, doc_term_matrix,doc_clean,
stop, start, step)
# Show graph
x = range(start, stop, step)
plt.plot(x, coherence_values)
plt.xlabel("Number of Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
# LSA Model
number_of_topics=7
words=10
document_list,titles=load_data("","articles.txt")
clean_text=preprocess_data(document_list)
model=create_gensim_lsa_model(clean_text,number_of_topics,words)
start,stop,step=2,12,1
plot_graph(clean_text,start,stop,step)
###Output
_____no_output_____ |
Auditoriski/DigitRecognition.ipynb | ###Markdown
Handwritten digit recognition* http://yann.lecun.com/exdb/mnist/
###Code
from mnist import MNIST
mndata = MNIST('./Resources/Data/numbers')
mndata.gz = True
train_images, train_labels = mndata.load_training()
test_images, test_labels = mndata.load_testing()
print(test_labels[1])
print(MNIST.display(test_images[0]))
###Output
2
............................
............................
............................
............................
............................
............................
............................
............................
......@@@@@@................
...........@@@@@@@@@@.......
...................@@.......
...................@@.......
..................@@........
..................@@........
.................@@.........
.................@..........
................@@..........
................@...........
...............@@...........
..............@@............
.............@@@............
.............@@.............
............@@..............
............@@..............
...........@@@..............
...........@@@..............
...........@@...............
............................
###Markdown
DecisionTrees
###Code
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import numpy as np
import pandas as pd
clf = DecisionTreeClassifier()
clf.fit(train_images, train_labels)
pred_labels = clf.predict(test_images)
print('Accuracy Score on train data: ', accuracy_score(y_true=np.array(train_labels), y_pred=clf.predict(train_images)))
print('Accuracy Score on test data: ', accuracy_score(y_true=np.array(test_labels), y_pred=pred_labels))
###Output
Accuracy Score on train data: 1.0
Accuracy Score on test data: 0.8765
###Markdown
MultinomialNB
###Code
clf = MultinomialNB()
clf.fit(train_images, train_labels)
pred_labels = clf.predict(test_images)
print('Accuracy Score on train data: ', accuracy_score(y_true=np.array(train_labels), y_pred=clf.predict(train_images)))
print('Accuracy Score on test data: ', accuracy_score(y_true=np.array(test_labels), y_pred=pred_labels))
###Output
Accuracy Score on train data: 0.8252833333333334
Accuracy Score on test data: 0.8365
###Markdown
Neural network
###Code
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier()
clf.fit(train_images, train_labels)
pred_labels = clf.predict(test_images)
pred_prob = clf.predict_proba(test_images)
print('Accuracy Score on train data: ', accuracy_score(y_true=np.array(train_labels), y_pred=clf.predict(train_images)))
print('Accuracy Score on test data: ', accuracy_score(y_true=np.array(test_labels), y_pred=pred_labels))
clf.n_iter_
clf.classes_
###Output
_____no_output_____
###Markdown
Image converting and testing
###Code
from PIL import Image, ImageFilter
from matplotlib import pyplot as plt
def imageprepare(argv):
im = Image.open(argv).convert('L')
width = float(im.size[0])
height = float(im.size[1])
newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels
if width > height: # check which dimension is bigger
# Width is bigger. Width becomes 20 pixels.
nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width
if (nheight == 0): # rare case but minimum is 1 pixel
nheight = 1
# resize and sharpen
img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position
newImage.paste(img, (4, wtop)) # paste resized image on white canvas
else:
# Height is bigger. Heigth becomes 20 pixels.
nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height
if (nwidth == 0): # rare case but minimum is 1 pixel
nwidth = 1
# resize and sharpen
img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition
newImage.paste(img, (wleft, 4)) # paste resized image on white canvas
# newImage.save("sample.png
tv = list(newImage.getdata()) # get pixel values
# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
tva = [(255 - x) for x in tv]
return tva
x=[imageprepare('Resources/Data/test.png')]
newArr=[[0 for d in range(28)] for y in range(28)]
k = 0
for i in range(28):
for j in range(28):
newArr[i][j]=x[0][k]
k+=1
plt.imshow(newArr, interpolation='nearest')
plt.show()
table = [(i,temp*100)for i,temp in enumerate(clf.predict_proba(x)[0])]
df = pd.DataFrame(table, columns=['number','chance'])
import plotly.express as px
fig = px.bar(df, x='number', y='chance')
fig.update_xaxes(range=[-0.5, 9])
fig
###Output
_____no_output_____ |
nlu/colab/healthcare/entity_resolution/entity_resolvers_overview.ipynb | ###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/healthcare/entity_resolution/entity_resolvers_overview.ipynb) Entity Resolution**Named entities** are sub-strings in a text that can be classified into catogires. For example, in the String `"Tesla is a great stock to invest in "` , the sub-string `"Tesla"` is a named entity, it can be classified with the label `company` by an ML algorithm. **Named entities** can easily be extracted by the various pre-trained Deep Learning based NER algorithms provided by NLU. After extracting **named entities** an **entity resolution algorithm** can be applied to the extracted named entities. The resolution algorithm classifies each extracted entitiy into a class, which reduces dimensionality of the data and has many useful applications. For example : - "**Tesla** is a great stock to invest in "- "**TSLA** is a great stock to invest in "- "**Tesla, Inc** is a great company to invest in" The sub-strings `Tesla` , `TSLA` and `Tesla, Inc` are all named entities, that are classified with the labeld `company` by the NER algorithm. It tells us, all these 3 sub-strings are of type `company`, but we cannot yet infer that these 3 strings are actually referring to literally the same company. This exact problem is solved by the resolver algorithms, it would resolve all these 3 entities to a common name, like a company ID. This maps every reference of Tesla, regardless of how the string is represented, to the same ID.This example can analogusly be expanded to healthcare any any other text problems. In medical documents, the same disease can be referenced in many different ways. With NLU Healthcare you can leverage state of the art pre-trained NER models to extract **Medical Named Entities** (Diseases, Treatments, Posology, etc..) and **resolve these** to common **healthcare disease codes**.These algorithms are based provided by **Spark NLP for Healthcare's** [SentenceEntitiyResolver](https://nlp.johnsnowlabs.com/docs/en/licensed_annotatorssentenceentityresolver) and [ChunkEntityResolvers](https://nlp.johnsnowlabs.com/docs/en/licensed_annotatorschunkentityresolver) Avaiable modelsAll the models avaiable are :| Language | nlu.load() reference | Spark NLP Model reference || -------- | ------------------------------------------------------------ | ------------------------------------------------------------ || English | embed_sentence.biobert.mli | sbiobert_base_cased_mli || English | resolve | sbiobertresolve_cpt || English | resolve.cpt | sbiobertresolve_cpt || English | resolve.cpt.augmented | sbiobertresolve_cpt_augmented || English | resolve.cpt.procedures_augmented | sbiobertresolve_cpt_procedures_augmented || English | resolve.hcc.augmented | sbiobertresolve_hcc_augmented || English | [resolve.icd10cm](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10cm_en.html) | [sbiobertresolve_icd10cm](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10cm_en.html) || English | [resolve.icd10cm.augmented](https://nlp.johnsnowlabs.com/2020/12/13/sbiobertresolve_icd10cm_augmented_en.html) | [sbiobertresolve_icd10cm_augmented](https://nlp.johnsnowlabs.com/2020/12/13/sbiobertresolve_icd10cm_augmented_en.html) || English | [resolve.icd10cm.augmented_billable](https://nlp.johnsnowlabs.com/2021/02/06/sbiobertresolve_icd10cm_augmented_billable_hcc_en.html) | [sbiobertresolve_icd10cm_augmented_billable_hcc](https://nlp.johnsnowlabs.com/2021/02/06/sbiobertresolve_icd10cm_augmented_billable_hcc_en.html) || English | [resolve.icd10pcs](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10pcs_en.html) | [sbiobertresolve_icd10pcs](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10pcs_en.html) || English | [resolve.icdo](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icdo_en.html) | [sbiobertresolve_icdo](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icdo_en.html) || English | [resolve.rxcui](https://nlp.johnsnowlabs.com/2020/12/11/sbiobertresolve_rxcui_en.html) | [sbiobertresolve_rxcui](https://nlp.johnsnowlabs.com/2020/12/11/sbiobertresolve_rxcui_en.html) || English | [resolve.rxnorm](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_rxnorm_en.html) | [sbiobertresolve_rxnorm](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_rxnorm_en.html) || English | [resolve.snomed](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_en.html) | [sbiobertresolve_snomed_auxConcepts](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_en.html) || English | [resolve.snomed.aux_concepts](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_en.html) | [sbiobertresolve_snomed_auxConcepts](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_en.html) || English | [resolve.snomed.aux_concepts_int](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_int_en.html) | [sbiobertresolve_snomed_auxConcepts_int](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_int_en.html) || English | [resolve.snomed.findings](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_en.html) | [sbiobertresolve_snomed_findings](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_en.html) || English | [resolve.snomed.findings_int](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_int_en.html) | [sbiobertresolve_snomed_findings_int](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_int_en.html) |
###Code
# # Install NLU
# # Upload add your spark_nlp_fo"r_healthcare.json
!wget http://setup.johnsnowlabs.com/nlu/colab.sh -O - | bash
import nlu
###Output
--2022-04-15 03:50:47-- https://setup.johnsnowlabs.com/nlu/colab.sh
Resolving setup.johnsnowlabs.com (setup.johnsnowlabs.com)... 51.158.130.125
Connecting to setup.johnsnowlabs.com (setup.johnsnowlabs.com)|51.158.130.125|:443... connected.
HTTP request sent, awaiting response... 302 Moved Temporarily
Location: https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh [following]
--2022-04-15 03:50:47-- https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1665 (1.6K) [text/plain]
Saving to: ‘STDOUT’
- 100%[===================>] 1.63K --.-KB/s in 0s
2022-04-15 03:50:47 (34.3 MB/s) - written to stdout [1665/1665]
Installing NLU 3.4.3rc2 with PySpark 3.0.3 and Spark NLP 3.4.2 for Google Colab ...
Hit:1 http://archive.ubuntu.com/ubuntu bionic InRelease
Get:2 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]
Get:3 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease [3,626 B]
Ign:4 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease
Get:5 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]
Get:6 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease [15.9 kB]
Ign:7 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease
Get:8 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release [696 B]
Get:9 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]
Hit:10 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release
Get:11 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release.gpg [836 B]
Hit:12 http://ppa.launchpad.net/cran/libgit2/ubuntu bionic InRelease
Get:13 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic InRelease [15.9 kB]
Get:14 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [2,268 kB]
Hit:15 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease
Get:16 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [3,134 kB]
Get:18 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Packages [953 kB]
Get:19 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main Sources [1,947 kB]
Get:20 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [2,695 kB]
Get:21 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1,490 kB]
Get:22 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main amd64 Packages [996 kB]
Get:23 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic/main amd64 Packages [45.3 kB]
Fetched 13.8 MB in 4s (3,418 kB/s)
Reading package lists... Done
tar: spark-3.0.2-bin-hadoop2.7.tgz: Cannot open: No such file or directory
tar: Error is not recoverable: exiting now
[K |████████████████████████████████| 209.1 MB 54 kB/s
[K |████████████████████████████████| 142 kB 41.6 MB/s
[K |████████████████████████████████| 505 kB 51.7 MB/s
[K |████████████████████████████████| 198 kB 58.1 MB/s
[?25h Building wheel for pyspark (setup.py) ... [?25l[?25hdone
Collecting nlu_tmp==3.4.3rc10
Downloading nlu_tmp-3.4.3rc10-py3-none-any.whl (510 kB)
[K |████████████████████████████████| 510 kB 5.1 MB/s
[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (1.21.5)
Requirement already satisfied: pyarrow>=0.16.0 in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (6.0.1)
Requirement already satisfied: spark-nlp<3.5.0,>=3.4.2 in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (3.4.2)
Requirement already satisfied: pandas>=1.3.5 in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (1.3.5)
Requirement already satisfied: dataclasses in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (0.6)
Requirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=1.3.5->nlu_tmp==3.4.3rc10) (2018.9)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=1.3.5->nlu_tmp==3.4.3rc10) (2.8.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>=1.3.5->nlu_tmp==3.4.3rc10) (1.15.0)
Installing collected packages: nlu-tmp
Successfully installed nlu-tmp-3.4.3rc10
Spark NLP for Healthcare could not be imported. Installing latest spark-nlp-jsl PyPI package via pip...
###Markdown
[Athena Conditions Entity Resolver (Healthcare)](https://nlp.johnsnowlabs.com/2020/09/16/chunkresolve_athena_conditions_healthcare_en.html)
###Code
data ="""The patient is a 5-month-old infant who presented initially on Monday with a cold, cough, and runny nose for 2 days. Mom states she had no fever. Her appetite was good but she was spitting up a lot. She had no difficulty breathing and her cough was described as dry and hacky. At that time, physical exam showed a right TM, which was red. Left TM was okay. She was fairly congested but looked happy and playful. She was started on Amoxil and Aldex and we told to recheck in 2 weeks to recheck her ear. Mom returned to clinic again today because she got much worse overnight. She was having difficulty breathing. She was much more congested and her appetite had decreased significantly today. She also spiked a temperature yesterday of 102.6 and always having trouble sleeping secondary to congestion."""
nlu.load('med_ner.jsl.wip.clinical en.resolve_chunk.cpt_clinical').predict(data, output_level='chunk')
###Output
ner_wikiner_glove_840B_300 download started this may take some time.
Approximate size to download 14.8 MB
[OK!]
###Markdown
[Sentence Entity Resolver for ICD10-CM (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10cm_en.html)
###Code
nlu.load("med_ner.jsl.wip.clinical en.resolve.icd10cm").predict("""This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD ,
gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret\'s Center for Women & Infants for cardiac
catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction ,
subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU .""",output_level = "sentence")
###Output
_____no_output_____
###Markdown
[Sentence Entity Resolver for ICD10-PCS (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10pcs_en.html)
###Code
nlu.load("med_ner.jsl.wip.clinical en.resolve.icd10pcs").predict("""This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD ,
gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret\'s Center for Women & Infants for cardiac
catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction ,
subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU .""",output_level = "sentence")
###Output
_____no_output_____
###Markdown
[Sentence Entity Resolver for RxCUI (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/12/11/sbiobertresolve_rxcui_en.html)
###Code
nlu.load("med_ner.jsl.wip.clinical en.resolve.rxcui").predict("He was seen by the endocrinology service and she was discharged on 50 mg of eltrombopag oral at night, 5 mg amlodipine with meals, and metformin 1000 mg two times a day",output_level = "sentence")
###Output
_____no_output_____
###Markdown
[Sentence Entity Resolver for RxNorm (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_rxnorm_en.html)
###Code
import nlu
nlu.load("med_ner.jsl.wip.clinical en.resolve.rxnorm").predict("""This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD ,
gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret\'s Center for Women & Infants for cardiac
catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction ,
subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU .""",output_level = "sentence")
###Output
_____no_output_____
###Markdown
[Sentence Entity Resolver for Snomed Concepts, INT version (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_int_en.html)
###Code
nlu.load("med_ner.jsl.wip.clinical en.resolve.snomed.findings_int").predict("""This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD ,
gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret\'s Center for Women & Infants for cardiac
catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction ,
subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU .""",output_level = "sentence")
###Output
_____no_output_____
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/healthcare/entity_resolution/entity_resolvers_overview.ipynb) Entity Resolution**Named entities** are sub-strings in a text that can be classified into catogires. For example, in the String `"Tesla is a great stock to invest in "` , the sub-string `"Tesla"` is a named entity, it can be classified with the label `company` by an ML algorithm. **Named entities** can easily be extracted by the various pre-trained Deep Learning based NER algorithms provided by NLU. After extracting **named entities** an **entity resolution algorithm** can be applied to the extracted named entities. The resolution algorithm classifies each extracted entitiy into a class, which reduces dimensionality of the data and has many useful applications. For example : - "**Tesla** is a great stock to invest in "- "**TSLA** is a great stock to invest in "- "**Tesla, Inc** is a great company to invest in" The sub-strings `Tesla` , `TSLA` and `Tesla, Inc` are all named entities, that are classified with the labeld `company` by the NER algorithm. It tells us, all these 3 sub-strings are of type `company`, but we cannot yet infer that these 3 strings are actually referring to literally the same company. This exact problem is solved by the resolver algorithms, it would resolve all these 3 entities to a common name, like a company ID. This maps every reference of Tesla, regardless of how the string is represented, to the same ID.This example can analogusly be expanded to healthcare any any other text problems. In medical documents, the same disease can be referenced in many different ways. With NLU Healthcare you can leverage state of the art pre-trained NER models to extract **Medical Named Entities** (Diseases, Treatments, Posology, etc..) and **resolve these** to common **healthcare disease codes**.These algorithms are based provided by **Spark NLP for Healthcare's** [SentenceEntitiyResolver](https://nlp.johnsnowlabs.com/docs/en/licensed_annotatorssentenceentityresolver) and [ChunkEntityResolvers](https://nlp.johnsnowlabs.com/docs/en/licensed_annotatorschunkentityresolver) Avaiable modelsAll the models avaiable are :| Language | nlu.load() reference | Spark NLP Model reference || -------- | ------------------------------------------------------------ | ------------------------------------------------------------ || English | embed_sentence.biobert.mli | sbiobert_base_cased_mli || English | resolve | sbiobertresolve_cpt || English | resolve.cpt | sbiobertresolve_cpt || English | resolve.cpt.augmented | sbiobertresolve_cpt_augmented || English | resolve.cpt.procedures_augmented | sbiobertresolve_cpt_procedures_augmented || English | resolve.hcc.augmented | sbiobertresolve_hcc_augmented || English | [resolve.icd10cm](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10cm_en.html) | [sbiobertresolve_icd10cm](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10cm_en.html) || English | [resolve.icd10cm.augmented](https://nlp.johnsnowlabs.com/2020/12/13/sbiobertresolve_icd10cm_augmented_en.html) | [sbiobertresolve_icd10cm_augmented](https://nlp.johnsnowlabs.com/2020/12/13/sbiobertresolve_icd10cm_augmented_en.html) || English | [resolve.icd10cm.augmented_billable](https://nlp.johnsnowlabs.com/2021/02/06/sbiobertresolve_icd10cm_augmented_billable_hcc_en.html) | [sbiobertresolve_icd10cm_augmented_billable_hcc](https://nlp.johnsnowlabs.com/2021/02/06/sbiobertresolve_icd10cm_augmented_billable_hcc_en.html) || English | [resolve.icd10pcs](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10pcs_en.html) | [sbiobertresolve_icd10pcs](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10pcs_en.html) || English | [resolve.icdo](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icdo_en.html) | [sbiobertresolve_icdo](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icdo_en.html) || English | [resolve.rxcui](https://nlp.johnsnowlabs.com/2020/12/11/sbiobertresolve_rxcui_en.html) | [sbiobertresolve_rxcui](https://nlp.johnsnowlabs.com/2020/12/11/sbiobertresolve_rxcui_en.html) || English | [resolve.rxnorm](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_rxnorm_en.html) | [sbiobertresolve_rxnorm](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_rxnorm_en.html) || English | [resolve.snomed](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_en.html) | [sbiobertresolve_snomed_auxConcepts](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_en.html) || English | [resolve.snomed.aux_concepts](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_en.html) | [sbiobertresolve_snomed_auxConcepts](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_en.html) || English | [resolve.snomed.aux_concepts_int](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_int_en.html) | [sbiobertresolve_snomed_auxConcepts_int](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_auxConcepts_int_en.html) || English | [resolve.snomed.findings](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_en.html) | [sbiobertresolve_snomed_findings](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_en.html) || English | [resolve.snomed.findings_int](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_int_en.html) | [sbiobertresolve_snomed_findings_int](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_int_en.html) |
###Code
# # Install NLU
# # Upload add your spark_nlp_fo"r_healthcare.json
!wget http://setup.johnsnowlabs.com/nlu/colab.sh -O - | bash
import nlu
###Output
--2021-05-11 11:49:03-- http://setup.johnsnowlabs.com/nlu/colab.sh
Resolving setup.johnsnowlabs.com (setup.johnsnowlabs.com)... 51.158.130.125
Connecting to setup.johnsnowlabs.com (setup.johnsnowlabs.com)|51.158.130.125|:80... connected.
HTTP request sent, awaiting response... 302 Moved Temporarily
Location: https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh [following]
--2021-05-11 11:49:03-- https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1662 (1.6K) [text/plain]
Saving to: ‘STDOUT’
- 0%[ ] 0 --.-KB/s Installing NLU 3.0.1 with PySpark 3.0.2 and Spark NLP 3.0.1 for Google Colab ...
- 100%[===================>] 1.62K --.-KB/s in 0.001s
2021-05-11 11:49:03 (2.14 MB/s) - written to stdout [1662/1662]
Hit:1 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease
Ign:2 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease
Hit:3 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease
Ign:4 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease
Hit:5 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release
Hit:6 http://security.ubuntu.com/ubuntu bionic-security InRelease
Hit:7 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release
Hit:8 http://archive.ubuntu.com/ubuntu bionic InRelease
Hit:9 http://archive.ubuntu.com/ubuntu bionic-updates InRelease
Hit:10 http://ppa.launchpad.net/cran/libgit2/ubuntu bionic InRelease
Hit:11 http://archive.ubuntu.com/ubuntu bionic-backports InRelease
Hit:12 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic InRelease
Hit:13 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease
Reading package lists... Done
###Markdown
[Athena Conditions Entity Resolver (Healthcare)](https://nlp.johnsnowlabs.com/2020/09/16/chunkresolve_athena_conditions_healthcare_en.html)
###Code
data ="""The patient is a 5-month-old infant who presented initially on Monday with a cold, cough, and runny nose for 2 days. Mom states she had no fever. Her appetite was good but she was spitting up a lot. She had no difficulty breathing and her cough was described as dry and hacky. At that time, physical exam showed a right TM, which was red. Left TM was okay. She was fairly congested but looked happy and playful. She was started on Amoxil and Aldex and we told to recheck in 2 weeks to recheck her ear. Mom returned to clinic again today because she got much worse overnight. She was having difficulty breathing. She was much more congested and her appetite had decreased significantly today. She also spiked a temperature yesterday of 102.6 and always having trouble sleeping secondary to congestion."""
nlu.load('med_ner.jsl.wip.clinical en.resolve_chunk.cpt_clinical').predict(data, output_level='chunk')
###Output
Spark NLP Healthcare could not be imported. Installing latest spark-nlp-jsl PyPI package via pip...
jsl_ner_wip_clinical download started this may take some time.
Approximate size to download 14.5 MB
[OK!]
chunkresolve_cpt_clinical download started this may take some time.
Approximate size to download 16.1 MB
[OK!]
embeddings_clinical download started this may take some time.
Approximate size to download 1.6 GB
[OK!]
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
###Markdown
[Sentence Entity Resolver for ICD10-CM (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10cm_en.html)
###Code
nlu.load("med_ner.jsl.wip.clinical en.resolve.icd10cm").predict("""This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD ,
gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret\'s Center for Women & Infants for cardiac
catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction ,
subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU .""",output_level = "sentence")
###Output
jsl_ner_wip_clinical download started this may take some time.
Approximate size to download 14.5 MB
[OK!]
sbiobertresolve_icd10cm download started this may take some time.
Approximate size to download 201 MB
[OK!]
sbiobert_base_cased_mli download started this may take some time.
Approximate size to download 384.3 MB
[OK!]
embeddings_clinical download started this may take some time.
Approximate size to download 1.6 GB
[OK!]
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
###Markdown
[Sentence Entity Resolver for ICD10-PCS (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_icd10pcs_en.html)
###Code
nlu.load("med_ner.jsl.wip.clinical en.resolve.icd10pcs").predict("""This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD ,
gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret\'s Center for Women & Infants for cardiac
catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction ,
subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU .""",output_level = "sentence")
###Output
jsl_ner_wip_clinical download started this may take some time.
Approximate size to download 14.5 MB
[OK!]
sbiobertresolve_icd10pcs download started this may take some time.
Approximate size to download 220.1 MB
[OK!]
sbiobert_base_cased_mli download started this may take some time.
Approximate size to download 384.3 MB
[OK!]
embeddings_clinical download started this may take some time.
Approximate size to download 1.6 GB
[OK!]
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
###Markdown
[Sentence Entity Resolver for RxCUI (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/12/11/sbiobertresolve_rxcui_en.html)
###Code
nlu.load("med_ner.jsl.wip.clinical en.resolve.rxcui").predict("He was seen by the endocrinology service and she was discharged on 50 mg of eltrombopag oral at night, 5 mg amlodipine with meals, and metformin 1000 mg two times a day",output_level = "sentence")
###Output
jsl_ner_wip_clinical download started this may take some time.
Approximate size to download 14.5 MB
[OK!]
sbiobertresolve_rxcui download started this may take some time.
Approximate size to download 53.2 MB
[OK!]
sbiobert_base_cased_mli download started this may take some time.
Approximate size to download 384.3 MB
[OK!]
embeddings_clinical download started this may take some time.
Approximate size to download 1.6 GB
[OK!]
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
###Markdown
[Sentence Entity Resolver for RxNorm (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_rxnorm_en.html)
###Code
import nlu
nlu.load("med_ner.jsl.wip.clinical en.resolve.rxnorm").predict("""This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD ,
gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret\'s Center for Women & Infants for cardiac
catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction ,
subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU .""",output_level = "sentence")
###Output
jsl_ner_wip_clinical download started this may take some time.
Approximate size to download 14.5 MB
[OK!]
sbiobertresolve_rxnorm download started this may take some time.
Approximate size to download 810.7 MB
[OK!]
embeddings_clinical download started this may take some time.
Approximate size to download 1.6 GB
[OK!]
sbiobert_base_cased_mli download started this may take some time.
Approximate size to download 384.3 MB
[OK!]
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
###Markdown
[Sentence Entity Resolver for Snomed Concepts, INT version (sbiobert_base_cased_mli embeddings)](https://nlp.johnsnowlabs.com/2020/11/27/sbiobertresolve_snomed_findings_int_en.html)
###Code
nlu.load("med_ner.jsl.wip.clinical en.resolve.snomed.findings_int").predict("""This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD ,
gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret\'s Center for Women & Infants for cardiac
catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction ,
subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU .""",output_level = "sentence")
###Output
_____no_output_____ |
courses/machine_learning/deepdive2/production_ml/labs/distributed_training.ipynb | ###Markdown
Distributed Training with GPUs on Cloud AI Platform**Learning Objectives:** 1. Setting up the environment 1. Create a model to train locally 1. Train on multiple GPUs/CPUs with MultiWorkerMirrored StrategyIn this notebook, we will walk through using Cloud AI Platform to perform distributed training using the `MirroredStrategy` found within `tf.keras`. This strategy will allow us to use the synchronous AllReduce strategy on a VM with multiple GPUs attached.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [Solution Notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/production_ml/solutions/distributed_training.ipynb) for reference.
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
###Output
_____no_output_____
###Markdown
Next we will configure our environment. Be sure to change the `PROJECT_ID` variable in the below cell to your Project ID. This will be the project to which the Cloud AI Platform resources will be billed. We will also create a bucket for our training artifacts (if it does not already exist). Lab Task 1: Setting up the environment
###Code
import os
# TODO 1
PROJECT_ID = "cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT_ID
REGION = 'us-central1'
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["BUCKET"] = BUCKET
###Output
_____no_output_____
###Markdown
Since we are going to submit our training job to Cloud AI Platform, we need to create our trainer package. We will create the `train` directory for our package and create a blank `__init__.py` file so Python knows that this folder contains a package.
###Code
!mkdir train
!touch train/__init__.py
###Output
_____no_output_____
###Markdown
Next we will create a module containing a function which will create our model. Note that we will be using the Fashion MNIST dataset. Since it's a small dataset, we will simply load it into memory for getting the parameters for our model.Our model will be a DNN with only dense layers, applying dropout to each hidden layer. We will also use ReLU activation for all hidden layers.
###Code
%%writefile train/model_definition.py
import tensorflow as tf
import numpy as np
# Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=x_train.shape[1:]))
model.add(tf.keras.layers.Dense(1028))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(256))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Activation('softmax'))
return model
###Output
Writing train/model_definition.py
###Markdown
Before we submit our training jobs to Cloud AI Platform, let's be sure our model runs locally. We will call the `model_definition` function to create our model and use `tf.keras.datasets.fashion_mnist.load_data()` to import the Fashion MNIST dataset. Lab Task 2: Create a model to train locally
###Code
import os
import time
import tensorflow as tf
import numpy as np
from train import model_definition
#Get data
# TODO 2
# TODO -- Your code here.
print("Training time without GPUs locally: {}".format(time.time() - start))
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
8192/5148 [===============================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 0s 0us/step
240/240 [==============================] - 174s 725ms/step - loss: 4.1184 - sparse_categorical_accuracy: 0.6367 - val_loss: 0.6234 - val_sparse_categorical_accuracy: 0.7880
Training time without GPUs locally: 175.62197422981262
###Markdown
Train on multiple GPUs/CPUs with MultiWorkerMirrored Strategy That took a few minutes to train our model for 20 epochs. Let's see how we can do better using Cloud AI Platform. We will be leveraging the `MultiWorkerMirroredStrategy` supplied in `tf.distribute`. The main difference between this code and the code from the local test is that we need to compile the model within the scope of the strategy. When we do this our training op will use information stored in the `TF_CONFIG` variable to assign ops to the various devices for the AllReduce strategy. After the training process finishes, we will print out the time spent training. Since it takes a few minutes to spin up the resources being used for training on Cloud AI Platform, and this time can vary, we want a consistent measure of how long training took.Note: When we train models on Cloud AI Platform, the `TF_CONFIG` variable is automatically set. So we do not need to worry about adjusting based on what cluster configuration we use.
###Code
%%writefile train/train_mult_worker_mirrored.py
import os
import time
import tensorflow as tf
import numpy as np
from . import model_definition
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
#Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_dataset(X, Y, epochs, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
dataset = dataset.repeat(epochs).batch(batch_size, drop_remainder=True)
return dataset
ds_train = create_dataset(x_train, y_train, 20, 5000)
ds_test = create_dataset(x_test, y_test, 1, 1000)
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
model = model_definition.create_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, ),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
start = time.time()
model.fit(
ds_train,
validation_data=ds_test,
verbose=2
)
print("Training time with multiple GPUs: {}".format(time.time() - start))
###Output
Writing train/train_mult_worker_mirrored.py
###Markdown
Lab Task 3: Training with multiple GPUs/CPUs on created model using MultiWorkerMirrored Strategy First we will train a model without using GPUs to give us a baseline. We will use a consistent format throughout the trials. We will define a `config.yaml` file to contain our cluster configuration and the pass this file in as the value of a command-line argument `--config`.In our first example, we will use a single `n1-highcpu-16` VM.
###Code
%%writefile config.yaml
# TODO 3a
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="cpu_only_fashion_minst_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
###Output
jobId: cpu_only_fashion_minst_20200903_154222
state: QUEUED
###Markdown
If we go through the logs, we see that the training job will take around 5-7 minutes to complete. Let's now attach two Nvidia Tesla K80 GPUs and rerun the training job.
###Code
%%writefile config.yaml
# TODO 3b
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_2gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
###Output
jobId: multi_gpu_fashion_minst_2gpu_20200903_154225
state: QUEUED
###Markdown
That was a lot faster! The training job will take upto 5-10 minutes to complete. Let's keep going and add more GPUs!
###Code
%%writefile config.yaml
# TODO 3c
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_4gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
###Output
jobId: multi_gpu_fashion_minst_4gpu_20200903_154228
state: QUEUED
###Markdown
The training job will take upto 10 minutes to complete. It was faster than no GPUs, but why was it slower than 2 GPUs? If you rerun this job with 8 GPUs you'll actually see it takes just as long as using no GPUs!The answer is in our input pipeline. In short, the I/O involved in using more GPUs started to outweigh the benefits of having more available devices. We can try to improve our input pipelines to overcome this (e.g. using caching, adjusting batch size, etc.).
###Code
###Output
_____no_output_____
###Markdown
Distributed Training with GPUs on Cloud AI Platform**Learning Objectives:** 1. Setting up the environment 1. Create a model to train locally 1. Train on multiple GPUs/CPUs with MultiWorkerMirrored StrategyIn this notebook, we will walk through using Cloud AI Platform to perform distributed training using the `MirroredStrategy` found within `tf.keras`. This strategy will allow us to use the synchronous AllReduce strategy on a VM with multiple GPUs attached.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [Solution Notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/production_ml/solutions/distributed_training.ipynb) for reference.
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
###Output
_____no_output_____
###Markdown
Next we will configure our environment. Be sure to change the `PROJECT_ID` variable in the below cell to your Project ID. This will be the project to which the Cloud AI Platform resources will be billed. We will also create a bucket for our training artifacts (if it does not already exist). Lab Task 1: Setting up the environment
###Code
import os
# TODO 1
PROJECT_ID = "cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT_ID
REGION = 'us-central1'
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["BUCKET"] = BUCKET
###Output
_____no_output_____
###Markdown
Since we are going to submit our training job to Cloud AI Platform, we need to create our trainer package. We will create the `train` directory for our package and create a blank `__init__.py` file so Python knows that this folder contains a package.
###Code
!mkdir train
!touch train/__init__.py
###Output
_____no_output_____
###Markdown
Next we will create a module containing a function which will create our model. Note that we will be using the Fashion MNIST dataset. Since it's a small dataset, we will simply load it into memory for getting the parameters for our model.Our model will be a DNN with only dense layers, applying dropout to each hidden layer. We will also use ReLU activation for all hidden layers.
###Code
%%writefile train/model_definition.py
import tensorflow as tf
import numpy as np
# Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=x_train.shape[1:]))
model.add(tf.keras.layers.Dense(1028))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(256))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Activation('softmax'))
return model
###Output
Writing train/model_definition.py
###Markdown
Before we submit our training jobs to Cloud AI Platform, let's be sure our model runs locally. We will call the `model_definition` function to create our model and use `tf.keras.datasets.fashion_mnist.load_data()` to import the Fashion MNIST dataset. Lab Task 2: Create a model to train locally
###Code
import os
import time
import tensorflow as tf
import numpy as np
from train import model_definition
#Get data
# TODO 2
# TODO -- Your code here.
print("Training time without GPUs locally: {}".format(time.time() - start))
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
8192/5148 [===============================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 0s 0us/step
240/240 [==============================] - 174s 725ms/step - loss: 4.1184 - sparse_categorical_accuracy: 0.6367 - val_loss: 0.6234 - val_sparse_categorical_accuracy: 0.7880
Training time without GPUs locally: 175.62197422981262
###Markdown
Train on multiple GPUs/CPUs with MultiWorkerMirrored Strategy That took a few minutes to train our model for 20 epochs. Let's see how we can do better using Cloud AI Platform. We will be leveraging the `MultiWorkerMirroredStrategy` supplied in `tf.distribute`. The main difference between this code and the code from the local test is that we need to compile the model within the scope of the strategy. When we do this our training op will use information stored in the `TF_CONFIG` variable to assign ops to the various devices for the AllReduce strategy. After the training process finishes, we will print out the time spent training. Since it takes a few minutes to spin up the resources being used for training on Cloud AI Platform, and this time can vary, we want a consistent measure of how long training took.Note: When we train models on Cloud AI Platform, the `TF_CONFIG` variable is automatically set. So we do not need to worry about adjusting based on what cluster configuration we use.
###Code
%%writefile train/train_mult_worker_mirrored.py
import os
import time
import tensorflow as tf
import numpy as np
from . import model_definition
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
#Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_dataset(X, Y, epochs, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
dataset = dataset.repeat(epochs).batch(batch_size, drop_remainder=True)
return dataset
ds_train = create_dataset(x_train, y_train, 20, 5000)
ds_test = create_dataset(x_test, y_test, 1, 1000)
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
model = model_definition.create_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, ),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
start = time.time()
model.fit(
ds_train,
validation_data=ds_test,
verbose=2
)
print("Training time with multiple GPUs: {}".format(time.time() - start))
###Output
Writing train/train_mult_worker_mirrored.py
###Markdown
Lab Task 3: Training with multiple GPUs/CPUs on created model using MultiWorkerMirrored Strategy First we will train a model without using GPUs to give us a baseline. We will use a consistent format throughout the trials. We will define a `config.yaml` file to contain our cluster configuration and the pass this file in as the value of a command-line argument `--config`.In our first example, we will use a single `n1-highcpu-16` VM.
###Code
%%writefile config.yaml
# TODO 3a
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="cpu_only_fashion_minst_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
###Output
jobId: cpu_only_fashion_minst_20200903_154222
state: QUEUED
###Markdown
If we go through the logs, we see that the training job will take around 5-7 minutes to complete. Let's now attach two Nvidia Tesla K80 GPUs and rerun the training job.
###Code
%%writefile config.yaml
# TODO 3b
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_2gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
###Output
jobId: multi_gpu_fashion_minst_2gpu_20200903_154225
state: QUEUED
###Markdown
That was a lot faster! The training job will take upto 5-10 minutes to complete. Let's keep going and add more GPUs!
###Code
%%writefile config.yaml
# TODO 3c
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_4gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
###Output
jobId: multi_gpu_fashion_minst_4gpu_20200903_154228
state: QUEUED
###Markdown
The training job will take upto 10 minutes to complete. It was faster than no GPUs, but why was it slower than 2 GPUs? If you rerun this job with 8 GPUs you'll actually see it takes just as long as using no GPUs!The answer is in our input pipeline. In short, the I/O involved in using more GPUs started to outweigh the benefits of having more available devices. We can try to improve our input pipelines to overcome this (e.g. using caching, adjusting batch size, etc.).
###Code
###Output
_____no_output_____
###Markdown
Distributed Training with GPUs on Cloud AI Platform**Learning Objectives:** 1. Setting up the environment 1. Create a model to train locally 1. Train on multiple GPUs/CPUs with MultiWorkerMirrored StrategyIn this notebook, we will walk through using Cloud AI Platform to perform distributed training using the `MirroredStrategy` found within `tf.keras`. This strategy will allow us to use the synchronous AllReduce strategy on a VM with multiple GPUs attached.Each learning objective will correspond to a __TODO__ in this student lab notebook -- try to complete this notebook first and then review the [Solution Notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/production_ml/solutions/distributed_training.ipynb) for reference.
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
###Output
_____no_output_____
###Markdown
Next we will configure our environment. Be sure to change the `PROJECT_ID` variable in the below cell to your Project ID. This will be the project to which the Cloud AI Platform resources will be billed. We will also create a bucket for our training artifacts (if it does not already exist). Lab Task 1: Setting up the environment
###Code
import os
# TODO 1
PROJECT_ID = "cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT_ID
REGION = 'us-central1'
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["BUCKET"] = BUCKET
###Output
_____no_output_____
###Markdown
Since we are going to submit our training job to Cloud AI Platform, we need to create our trainer package. We will create the `train` directory for our package and create a blank `__init__.py` file so Python knows that this folder contains a package.
###Code
!mkdir train
!touch train/__init__.py
###Output
_____no_output_____
###Markdown
Next we will create a module containing a function which will create our model. Note that we will be using the Fashion MNIST dataset. Since it's a small dataset, we will simply load it into memory for getting the parameters for our model.Our model will be a DNN with only dense layers, applying dropout to each hidden layer. We will also use ReLU activation for all hidden layers.
###Code
%%writefile train/model_definition.py
import tensorflow as tf
import numpy as np
# Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=x_train.shape[1:]))
model.add(tf.keras.layers.Dense(1028))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(256))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Activation('softmax'))
return model
###Output
Writing train/model_definition.py
###Markdown
Before we submit our training jobs to Cloud AI Platform, let's be sure our model runs locally. We will call the `model_definition` function to create our model and use `tf.keras.datasets.fashion_mnist.load_data()` to import the Fashion MNIST dataset. Lab Task 2: Create a model to train locally
###Code
import os
import time
import tensorflow as tf
import numpy as np
from train import model_definition
#Get data
# TODO 2
# TODO -- Your code here.
print("Training time without GPUs locally: {}".format(time.time() - start))
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
8192/5148 [===============================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 0s 0us/step
240/240 [==============================] - 174s 725ms/step - loss: 4.1184 - sparse_categorical_accuracy: 0.6367 - val_loss: 0.6234 - val_sparse_categorical_accuracy: 0.7880
Training time without GPUs locally: 175.62197422981262
###Markdown
Train on multiple GPUs/CPUs with MultiWorkerMirrored Strategy That took a few minutes to train our model for 20 epochs. Let's see how we can do better using Cloud AI Platform. We will be leveraging the `MultiWorkerMirroredStrategy` supplied in `tf.distribute`. The main difference between this code and the code from the local test is that we need to compile the model within the scope of the strategy. When we do this our training op will use information stored in the `TF_CONFIG` variable to assign ops to the various devices for the AllReduce strategy. After the training process finishes, we will print out the time spent training. Since it takes a few minutes to spin up the resources being used for training on Cloud AI Platform, and this time can vary, we want a consistent measure of how long training took.Note: When we train models on Cloud AI Platform, the `TF_CONFIG` variable is automatically set. So we do not need to worry about adjusting based on what cluster configuration we use.
###Code
%%writefile train/train_mult_worker_mirrored.py
import os
import time
import tensorflow as tf
import numpy as np
from . import model_definition
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
#Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_dataset(X, Y, epochs, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
dataset = dataset.repeat(epochs).batch(batch_size, drop_remainder=True)
return dataset
ds_train = create_dataset(x_train, y_train, 20, 5000)
ds_test = create_dataset(x_test, y_test, 1, 1000)
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
model = model_definition.create_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, ),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
start = time.time()
model.fit(
ds_train,
validation_data=ds_test,
verbose=2
)
print("Training time with multiple GPUs: {}".format(time.time() - start))
###Output
Writing train/train_mult_worker_mirrored.py
###Markdown
Lab Task 3: Training with multiple GPUs/CPUs on created model using MultiWorkerMirrored Strategy First we will train a model without using GPUs to give us a baseline. We will use a consistent format throughout the trials. We will define a `config.yaml` file to contain our cluster configuration and the pass this file in as the value of a command-line argument `--config`.In our first example, we will use a single `n1-highcpu-16` VM.
###Code
%%writefile config.yaml
# TODO 3a
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="cpu_only_fashion_minst_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.1 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
###Output
jobId: cpu_only_fashion_minst_20200903_154222
state: QUEUED
###Markdown
If we go through the logs, we see that the training job will take around 5-7 minutes to complete. Let's now attach two Nvidia Tesla K80 GPUs and rerun the training job.
###Code
%%writefile config.yaml
# TODO 3b
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_2gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.1 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
###Output
jobId: multi_gpu_fashion_minst_2gpu_20200903_154225
state: QUEUED
###Markdown
That was a lot faster! The training job will take upto 5-10 minutes to complete. Let's keep going and add more GPUs!
###Code
%%writefile config.yaml
# TODO 3c
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_4gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.1 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
###Output
jobId: multi_gpu_fashion_minst_4gpu_20200903_154228
state: QUEUED
###Markdown
The training job will take upto 10 minutes to complete. It was faster than no GPUs, but why was it slower than 2 GPUs? If you rerun this job with 8 GPUs you'll actually see it takes just as long as using no GPUs!The answer is in our input pipeline. In short, the I/O involved in using more GPUs started to outweigh the benefits of having more available devices. We can try to improve our input pipelines to overcome this (e.g. using caching, adjusting batch size, etc.).
###Code
###Output
_____no_output_____ |
lab3/transposition_block.ipynb | ###Markdown
Блок перестановки16-ричные данные перегоняются через конечный блок перестановки.На вход подается строка с 16-ричными числами длиной 16 символов.Реализуем необходимые функции:* функция перевода строки с 16-ричными числами в строку с двоичными числами* сама функция, которая применяет перестановку* функция перевода строки с двоичными числами в строку с 16-ричными числами
###Code
def hexStrToBinStr(hexStr):
binStr = ''
for c in hexStr:
binStr += str(bin(int(c, 16)))[2:].zfill(4)
return binStr
def transposition(binStr, transMatr):
binOutput = ''
for p in transMatr:
binOutput += binStr[p - 1]
return binOutput
def binStrToHexStr(binStr):
output = ''
for i in range(0, len(binStr), 4):
output += hex(int(binStr[i: i + 4], 2))[2:].upper()
return output
###Output
_____no_output_____
###Markdown
Протестируем:
###Code
trans = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
input = 'AAAABBBBCCCCDDDD'
print(f'Входная строка (16-ричная): {input}')
binInput = hexStrToBinStr(input)
print(f'Входная строка (двоичная): {binInput}')
binOutput = transposition(binInput, trans)
print(f'Выходная строка (двоичная): {binOutput}')
output = binStrToHexStr(binOutput)
print(f'Выходная строка (16-ричная): {output}')
###Output
Входная строка (16-ричная): AAAABBBBCCCCDDDD
Входная строка (двоичная): 1010101010101010101110111011101111001100110011001101110111011101
Выходная строка (двоичная): 0000111101010101101010101111111100001111010101011010101011111111
Выходная строка (16-ричная): 0F55AAFF0F55AAFF
|
03-Numpy Exercise.ipynb | ###Markdown
NumPy Exercises Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions. Import NumPy as np
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Create an array of 10 zeros
###Code
np.zeros(10)
###Output
_____no_output_____
###Markdown
Create an array of 10 ones
###Code
np.ones(10)
###Output
_____no_output_____
###Markdown
Create an array of 10 fives
###Code
np.ones(10)*5
###Output
_____no_output_____
###Markdown
Create an array of the integers from 10 to 50
###Code
np.arange(10,51)
###Output
_____no_output_____
###Markdown
Create an array of all the even integers from 10 to 50
###Code
np.arange(10,51,2)
###Output
_____no_output_____
###Markdown
Create a 3x3 matrix with values ranging from 0 to 8
###Code
np.arange(0,9).reshape(3,3)
###Output
_____no_output_____
###Markdown
Create a 3x3 identity matrix
###Code
np.eye(3,3)
np.eye(3)
###Output
_____no_output_____
###Markdown
Use NumPy to generate a random number between 0 and 1
###Code
np.random.rand(1)
###Output
_____no_output_____
###Markdown
Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution
###Code
np.random.randn(25)
###Output
_____no_output_____
###Markdown
Create the following matrix:
###Code
np.arange(1,101).reshape(10,10)/100
###Output
_____no_output_____
###Markdown
Create an array of 20 linearly spaced points between 0 and 1:
###Code
np.linspace(0,1,20)
###Output
_____no_output_____
###Markdown
Numpy Indexing and SelectionNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
###Code
x1 = np.arange(1,26).reshape(5,5)
x1
x1[2:,1:]
x1[0:3,1].reshape(3,1)
x1[4:]
x1[3:]
###Output
_____no_output_____
###Markdown
Now do the following Get the sum of all the values in mat
###Code
np.sum(x1)
###Output
_____no_output_____
###Markdown
Get the standard deviation of the values in mat
###Code
np.std(x1)
###Output
_____no_output_____
###Markdown
Get the sum of all the columns in mat
###Code
x1.sum(0)
###Output
_____no_output_____ |
Custom_DeOldify_VideoColorizer_Colab.ipynb | ###Markdown
DeOldify - Colorize your own videosA user-friendly simplified version of the original DeOldify Colab Notebook to quickly convert a youtube black and white video to color with audio that exports the result to google drive.Author: Ojas Dileep Sawant **Credits****Robert Bell**, **Dana Kelley** and people contributing to https://github.com/jantic/DeOldify.gitFor more customizations and features, visit the original Colab Notebook: https://colab.research.google.com/github/jantic/DeOldify/blob/master/VideoColorizerColab.ipynb **Prerequisite:**Before proceeding further:1. Click on **Runtime** menu on the top > **Change runtime type** > **GPU** > **Save**2. Then, click **Connect** on the top right. Before beginning, we will configure input/output settings and **mount** your **Google Drive Storage** to ensure the results are backed up before the session disconnects within the **12 hour limit** of Colab free session.When asked click open the url, **login** to your google drive in the new tab, hit **Allow** and copy-paste the authorization code and hit **Enter**.Update the URL link below and then run this section once everytime after opening this notebook.
###Code
#@ Configure Settings
#@markdown ### Input Youtube URL
#@markdown Default url is Arrival of a Train at La Ciotat (The Lumière Brothers, 1895)
INPUT_URL = "https://youtu.be/1dgLEDdFddk" #@param{type:"string"}
#@markdown ### Output Directory
#@markdown Path (relative to the root of your Google Drive) e.g. "My Drive/DeOldify_Data"
OUTPUT_DIR = "DeOldify_Data" #@param{type:"string"}
from google.colab import drive
drive.mount('/gdrive')
###Output
_____no_output_____
###Markdown
Setup**Click** this text section to ensure it is highlighted/selected and from the **Runtime menu** on top click on **Run After**
###Code
!git clone https://github.com/jantic/DeOldify.git DeOldify
cd DeOldify
#NOTE: This must be the first call in order to work properly!
from deoldify import device
from deoldify.device_id import DeviceId
#choices: CPU, GPU0...GPU7
device.set(device=DeviceId.GPU0)
import torch
if not torch.cuda.is_available():
print('GPU not available.')
from os import path
!pip install -r colab_requirements.txt
import fastai
from deoldify.visualize import *
from pathlib import Path
torch.backends.cudnn.benchmark=True
import warnings
warnings.filterwarnings("ignore", category=UserWarning, message=".*?Your .*? set is empty.*?")
!mkdir 'models'
!wget https://www.dropbox.com/s/336vn9y4qwyg9yz/ColorizeVideo_gen.pth?dl=0 -O ./models/ColorizeVideo_gen.pth
!wget https://media.githubusercontent.com/media/jantic/DeOldify/master/resource_images/watermark.png -O ./resource_images/watermark.png
colorizer = get_video_colorizer()
###Output
_____no_output_____
###Markdown
Run
###Code
source_url = INPUT_URL
render_factor = 21
watermarked = True
if source_url is not None and source_url !='':
video_path = colorizer.colorize_from_url(source_url, 'video.mp4', render_factor, watermarked=watermarked)
show_video_in_notebook(video_path)
else:
print('Provide a video url and try again.')
###Output
_____no_output_____
###Markdown
Export the source files and result to google drive.
###Code
!mkdir -p "/gdrive/My Drive/$OUTPUT_DIR/result" "/gdrive/My Drive/$OUTPUT_DIR/source"
%cp /content/DeOldify/video/result/* /gdrive/My\ Drive/$OUTPUT_DIR/result
%cp /content/DeOldify/video/source/* /gdrive/My\ Drive/$OUTPUT_DIR/source
!echo "Conversion and Google Drive Export completed at My Drive/"$OUTPUT_DIR
###Output
_____no_output_____ |
pruning/MSE_skygrid_MSLR_1e6_trees.ipynb | ###Markdown
Let's try to use EventFilter regressor for ranking problemstraining 1000000 trees, trying to obtain better quality
###Code
%pylab inline
import h5py
import pandas
from sklearn.metrics import mean_squared_error
def load_h5(name):
print "reading from",name
h5f = h5py.File(name,'r')
labels = h5f['labels'][:]
qids = h5f['qids'][:]
features = h5f['features'][:]
h5f.close()
print "done"
sorter = numpy.argsort(qids)
return features[sorter], qids[sorter], labels[sorter]
Xtr,Qtr,Ytr = load_h5("../data/MSLR/mslr_train")
Xts,Qts,Yts = load_h5("../data/MSLR/mslr_test")
print len(Xtr), len(Xts)
from rep_ef.estimators import MatrixNetSkyGridRegressor
ef = MatrixNetSkyGridRegressor(connection='skygrid', user_name='axelr', regularization=0.001,
features_sample_rate_per_iteration=0.2,
iterations=100000, training_fraction=0.2)
%%time
ef.fit(Xtr, Ytr)
import cPickle
with open('../data/MSLR10k_skygrid.mx', 'w') as f:
cPickle.dump(ef.formula_mx, f)
import cPickle
with open('../data/MSLR10k_skygrid.mx', 'r') as f:
formula_mx = cPickle.load(f)
from _matrixnetapplier import MatrixnetClassifier
from StringIO import StringIO
mn = MatrixnetClassifier(StringIO(formula_mx))
mean_squared_error(Yts, mn.apply(Xts))
from itertools import islice
def plot_mse_curves(clf, step=10):
mses_ts = []
for p in islice(clf.staged_predict(Xts), None, None, step):
mses_ts.append(mean_squared_error(Yts, p))
mses_tr = []
for p in islice(clf.staged_predict(Xtr), None, None, step):
mses_tr.append(mean_squared_error(Ytr, p))
plot(mses_ts)
plot(mses_tr)
return mses_tr, mses_ts
mses_ef = plot_mse_curves(ef, step=500)
ylim(0.5, 0.6), grid()
###Output
_____no_output_____ |
Policy Gradients.ipynb | ###Markdown
Policy Gradients with PyTorchIn this notebook, I'm going to walk through an implementation of policy gradients in PyTorch. Policy gradient methods are a set of deep reinforcement learning algorithms that use neural networks to appoximate the policy function, which actions to take given a state. I was inspired to write this by the impressive results of [OpenAI Five](https://blog.openai.com/openai-five/), a team of agents that learned to play Dota 2 better than amateurs using policy gradients. OpenAI Five uses a variation of policy gradients called Proximal Policy Optimization (PPO) to train the agents. Deep reinforcement learning agents are notoriously unstable and difficult to train, but PPO seems to be the most reliable method currently available. So, this notebook is working out a basic policy gradient implementation, then future notebooks will implement a full-blown PPO agent.I'll be using the CartPole environment from OpenAI Gym for this. The goal here is to move the cart left and right in an attempt to keep the pole upright.At this point, the CartPole environment seems to be the baseline for deep RL agents. If your agent can't solve CartPole, it can't solve anything.If you are new to reinforcement learning, here are a couple great resources from [Andrej Karpathy](http://karpathy.github.io/2016/05/31/rl/) and [Arthur Juliani](https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0).First off, import gym and PyTorch (as well as staples Numpy and Matplotlib).
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import gym
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn, optim
###Output
_____no_output_____
###Markdown
Creating an environment with Gym is simple, just `env = gym.make('env-name')`. I can get the CartPole environment with `'CartPole-v0'`. You can easily use other environments such as `'LunarLander-v2'` here and modify the code below appropriately.
###Code
env = gym.make('CartPole-v0')
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
###Markdown
In the CartPole environment, there are 4 values describing the state: the pole's angle, the pole's angular velocity, the cart's position, and the cart's velocity. This is called the *state space*, these four values are sufficient to describe the environment for our agent.The cart can make two actions, either move to the left or to the right. This is called the *action space*, the possible actions the agent can take in the environment. Here, the action space is *discrete*. That is, the actions can take on only specific values (move left or move right). The state space on the other hand is *continuous*, the values can be any numbers within some ranges. The actions here could be continuous. Instead of just "move left" or "move right" the actions could set the acceleration to the left or the acceleration to the right. In another notebook, I'll modify this implementation to work with continuous action spaces.
###Code
print(f"State space: {env.observation_space}")
print(f"Action space: {env.action_space}")
###Output
State space: Box(4,)
Action space: Discrete(2)
###Markdown
What we want to do is find a *policy* that looks at the current state and makes a prediction about which action to take. This can be thought of as a function that takes the state as an input and returns a probability distribution for the actions. Mathematically, this is represented by $p\left(y_i \mid x_i\right)$ where $x_i$ is the state and $y_i$ is the action for some trial $i$.In the example shown above, we have a state $x_i$ and our policy tells us the probabilities to take our two actions, 0.8 for action 1 and 0.2 for action 2. Based on those probabilities, the agent will choose an action stochastically. That is, the action will be chosen randomly according to the probabilities from our policy: there is an 80% chance the agent will perform action 1 and a 20% chance the agent will perform action 2.The goal here is to find a policy where our agent performs the task as well as possible. Neural networks with non-linear activation functions have a nice feature where they approximate arbitrary functions. This means we can train a neural network to give us our policy function $p\left(y_i \mid x_i\right)$. The goal is to give our network a state, $x_i$, and have it return a vector of probabilities for the action. The network will have a number of inputs equal to the dimension of the state space and a number of outputs equal to the dimensions of the action space. To add in non-linearity, I'll use a single hidden layer with a ReLU activation. The network might perform better with more layers and more units, feel free to modify this code and experiment with the network architecture.We can train this network by turning our reinforcement learning problem into a supervised learning problem, discussed in the next section.
###Code
# Creating a class for the agent
class Agent:
def __init__(self, n_states, n_hidden, n_actions, lr=0.003):
# Define the agent's network
self.net = nn.Sequential(nn.Linear(n_states, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_actions),
nn.Softmax(dim=0))
# How we're optimizing the network
self.opt = optim.Adam(self.net.parameters(), lr=lr)
def predict(self, observation):
""" Given an observation, a state, return a probability distribution
of actions
"""
state = torch.tensor(observation, dtype=torch.float32)
actions = agent.net(state)
return actions
def update(self, loss):
""" Update the agent's network given a loss """
self.opt.zero_grad()
loss.backward()
self.opt.step()
###Output
_____no_output_____
###Markdown
Since actions lead to later possible rewards, we want to propagate those rewards back to earlier actions. Good actions early on should be weighted more than later actions since they contribute more to the overall sequence of actions. TODO: Will need a better description here with some diagrams
###Code
def discount_rewards(rewards, gamma=0.99):
discounted = []
R = 0
for r in rewards[::-1]:
R = r + gamma * R
discounted.insert(0, R)
# Now normalize
discounted = np.array(discounted)
normed = (discounted - discounted.mean())/discounted.std()
return normed
###Output
_____no_output_____
###Markdown
TODO: Describe training procedure here
###Code
agent = Agent(4, 128, 2, lr=0.003)
total_episodes = 5000
max_steps = 999
solved_reward = 195
print_every = 10
update_every = 5
replay = {'actions':[], 'rewards':[]}
render = True
reward_log = []
ii = 0
while ii < total_episodes:
state = env.reset()
rewards, actions = [], []
for t in range(max_steps):
# Have our agent predict an action from the state
action_ps = agent.predict(state)
action = torch.multinomial(action_ps, 1).item()
actions.append(action_ps[action].unsqueeze(0))
# Using this action, get the next state and the reward
state, reward, done, _ = env.step(action)
rewards.append(reward)
if render and ii % print_every == 0:
env.render()
if done or t == (max_steps - 1):
# Record experiences
reward_log.append(sum(rewards))
if ii % print_every == 0:
print(sum(rewards))
losses = []
rewards = discount_rewards(rewards)
replay['actions'].extend(actions)
replay['rewards'].extend(rewards)
# Update our agent with the experiences
if ii % update_every == 0:
for a, r in zip(*replay.values()):
losses.append(-torch.log(a)*r)
loss = torch.cat(losses).sum()
agent.update(loss)
replay['actions'], replay['rewards'] = [], []
break
if sum(reward_log[-100:])/100 > solved_reward:
print(f"Environment solved in {ii-100} episodes with a reward of {np.mean(reward_log[-100:])}")
break
state = env.reset()
ii += 1
###Output
_____no_output_____
###Markdown
**Note:** This plot of the rewards is for the Lunar Lander environment
###Code
plt.plot(reward_log)
###Output
_____no_output_____
###Markdown
Here I'm just going to have our trained agent land a few times so I can make a video/gif.
###Code
trials = 10
max_steps = 999
for ii in range(trials):
state = env.reset()
# Have our agent predict an action from the state
for ii in range(max_steps):
action_ps = agent.predict(state)
action = torch.multinomial(action_ps, 1).item()
# Get the next state and the reward
state, reward, done, _ = env.step(action)
env.render()
if done:
break
###Output
_____no_output_____ |
FAI02_old/Lesson9/Lesson9_SR_CodeAlong.ipynb | ###Markdown
07 SEP 2017 - WH NixaloThis is a code along of the super-resolution portion of the FADL2 Lesson 9 JNB.
###Code
%matplotlib inline
import os; import sys; sys.path.insert(1, os.path.join('../utils'))
from utils2 import *
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
from keras import metrics
from vgg16_avg import VGG16_Avg
# Tell TensorFlow to use no more GPU RAM than necessary
limit_mem()
path = '../data/'
###Output
_____no_output_____
###Markdown
Use Content Loss to Create a Super-Resolution NetworkSo far we've demonstrated how to achieve successful results in style transfer. However, there's an obvious drawback to our implementation, namely that we're training an image, not a network, and therefore every new image requires us to retrain. It's not a feasible menthod for any sort of real-time application. Fortunately, we can address this issue by using a fully covolutional network (FCN), and in particular we'll look at this implementation for Super Resolution. We're following the approach in [this paper](https://arxiv.org/abs/1603.08155)
###Code
rn_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)
preproc = lambda x: (x - rn_mean)[:, :, :, ::-1]
deproc = lambda x,s: np.clip(x.reshape(s)[:, :, :, ::-1] + rn_mean, 0, 255)
arr_lr = bcolz.open(path + 'trn_resized_72.bc')[:]
arr_hr = bcolz.open(path + 'trn_resized_288.bc')[:]
pars = {'verbose': 0, 'callbacks': [TQDMNotebookCallback(leave_inner=True)]}
shp = arr_hr.shape[1:]
arr_hr.shape[1:]
###Output
_____no_output_____
###Markdown
To start we'll define some of the building blocks of our network. In particular recall the residual block (as used in [ResNet](https://arxiv.org/abs/1512.03385)), which is just a sequence of 2 Convolutional layers that is added to the initial block input. We also have a de-Convolutional layer (aka. a "Transposed Convolution" or "Fractionally-Strided Convolution"), whose purpose is to learn to 'undo' the Convolutional function. It does this by padding the smaller image in such a way to apply filters on it to produce a larger image.
###Code
def conv_block(x, filters, size, stride=(2,2), mode='same', act=True):
x = Convolution2D(filters, size, size, subsample=stride, border_mode=mode)(x)
x = BatchNormalization(mode=2)(x)
return Activation('relu')(x) if act else x
def res_block(ip, nf=64):
x = conv_block(ip, nf, 3, (1,1))
x = conv_block(x, nf, 3, (1,1), act=False)
return merge([x, ip], mode='sum')
# def deconv_block(x, filters, size, shape, stride=(2,2)):
# x = Deconvolution2D(filters, size, size, subsample=stride,
# border_mode='same', output_shape=(None,)+shape)(x)
# x = BatchNormalization(mode=2)(x)
# return Activation('relu')(x)
def up_block(x, filters, size):
x = keras.layers.UpSampling2D()(x)
x = Convolution2D(filters, size, size, border_mode='same')(x)
x = BatchNormalization(mode=2)(x)
return Activation('relu')(x)
###Output
_____no_output_____
###Markdown
This model here is using the previously defined blocks to envode a low res image and then upsample it to math the same image in hires.
###Code
inp = Input(arr_lr.shape[1:])
x = conv_block(inp, 64, 9, (1,1))
for i in range(4): x = res_block(x)
x = up_block(x, 64, 3)
x = up_block(x, 64, 3)
x = Convolution2D(3, 9, 9, activation='tanh', border_mode='same')(x)
outp = Lambda(lambda x: (x+1) * 127.5)(x)
###Output
_____no_output_____
###Markdown
The method of training this network is almost exactly the same as training the pixels from our previous implementations. The idea here is we're going to feed two images to Vgg16 and compare their convolutional outputs at some layer. These two images are the target image (which in our case is the same as the original but at a higher resolution), and the output of the previous network we just defined, which we hope will learn to output a high resolution image.The key then is to train this other network to produce an image that minimizes the loss between the outputs of some convoltuional layer in Vgg16 (which the paper refers to as "perceptual loss"). In doing so, we're able to train a network that can upsample an image and recreate the higher resolution details.
###Code
vgg_inp = Input(shp)
vgg = VGG16(include_top=False, input_tensor=Lambda(preproc)(vgg_inp))
###Output
_____no_output_____
###Markdown
Since we only want to learn the "upsampling network", and are just using VGG to calculate the loss function, we set the Vgg layers to not be trainable:
###Code
for λ in vgg.layers: λ.trainable=False
###Output
_____no_output_____
###Markdown
An important difference in training for super resolution is the loss function. We use what's known as a perceptual loss function (which is simply the content loss for some layer).
###Code
def get_outp(m, ln): return m.get_layer(f'block{ln}_conv1').output
vgg_content = Model(vgg_inp, [get_outp(vgg, o) for o in [1,2,3]])
vgg1 = vgg_content(vgg_inp)
vgg2 = vgg_content(outp)
# ln = 1
# print(f'block{ln}_conv1'.format(ln))
def mean_sqr_b(diff):
dims = list(range(1, K.ndim(diff)))
return K.expand_dims(K.sqrt(K.mean(diff**2, dims)), 0)
w = [0.1, 0.8, 0.1]
def content_fn(x):
res = 0; n=len(w)
for i in range(n): res += mean_sqr_b(x[i]-x[i+n]) * w[i]
return res
m_sr = Model([inp, vgg_inp], Lambda(content_fn)(vgg1+vgg2))
targ = np.zeros((arr_hr.shape[0], 1))
###Output
_____no_output_____
###Markdown
Finally we compile this chain of models and we can pass it the original lores image as well as the hires to train on. **We also define a zero vector as a target parameter, which is a necessary parameter when calling fit on a keras model.**
###Code
m_sr.compile('adam','mse')
m_sr.fit([arr_lr, arr_hr], targ, 8, 2, **pars)
m_sr.save_weights(path + 'lesson9/results/' + 'sr_final.h5')
###Output
_____no_output_____
###Markdown
We use learning rate annealing to get a better fit.
###Code
K.set_value(m_sr.optimizer.lr, 1e-4)
m_sr.fit([arr_lr, arr_hr], targ, 8, 1, **pars)
###Output
_____no_output_____
###Markdown
We're only interested in the trained part of the mdoel, which does the actual upsampling.
###Code
top_model = Model(inp, outp)
p = top_model.predict(arr_lr[10:11])
###Output
_____no_output_____
###Markdown
After tarining for some time, we get some very impressive results! Look at these two images, we can see that the predicted higher resolution image has filled in a lot of detail, including the shadows under the greens and the texture of the food.
###Code
plt.imshow(arr_lr[10].astype('uint8'));
plt.imshow(p[0].astype('uint8'));
top_model.save_weights(path + 'lesson9/results/' + 'sr_final.h5')
top_model.load_weights(path + 'lesson9/results/' + 'sr_final.h5')
###Output
_____no_output_____
###Markdown
The important thing to take away here is that as opposed to our earlier approaches, this type of approach results in a model that can create the desired image and is a scalable implementation.Note that we haven't used a test ste here, so we don't know if the above result is due to over-fitting.
###Code
# well, since you mention it:
mofolo_jup = Image.open(path + 'sr-imgs/Jupiter-Juno-LR.jpeg')
mofolo_jup = np.expand_dims(np.array(mofolo_jup), 0)
p = top_model.predict(mofolo_jup)
# lores Jupiter
plt.imshow(mofolo_jup[0].astype('uint8'));
# superes Jupiter
plt.imshow(p[0].astype('uint8'))
# original hires jupiter:
mofohi_jup = Image.open(path + 'sr-imgs/Jupiter-Juno-HR.jpg')
plt.imshow(mofohi_jup)
###Output
_____no_output_____
###Markdown
Fast Style TransferThe original paper showing the above approach to our super resolution also used this approach to createa a much faster style transfer system (for a specific style). Take a look at [the paper](https://arxiv.org/abs/1603.08155) and the very helpful [supplementary material](http://cs.stanford.edu/people/jcjohns/papers/eccv16/JohnsonECCV16Supplementary.pdf). Reflection PaddingThe supplementary material mentions that they found reflection padding helpful - we have implemented this as a Keras layer. All the other layers and blocks are already defined above.
###Code
class ReflectionPadding2D(Layer):
def __init__(self, padding=(1,1), **kwargs):
self.padding = tuple(padding)
self.input_spec = [InputSpec(ndim=4)]
super(ReflectionPadding2D, self).__init__(**kwargs)
def get_output_shape_for(self, s):
return (s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3])
def call(self, x, mask=None):
w_pad, h_pad = self.padding
return tf.pad(x, [[0,0], [h_pad, h_pad], [w_pad, w_pad], [0,0] ], 'REFLECT')
###Output
_____no_output_____
###Markdown
Testing the reflection padding layer:
###Code
inp = Input((288, 288, 3))
ref_model = Model(inp, ReflectionPadding2D((40, 10))(inp))
ref_model.compile('adam', 'mse')
p = ref_model.predict(arr_hr[10:11])
plt.imshow(p[0].astype('uint8'))
###Output
_____no_output_____
###Markdown
Main AlgorithmThis approach is exactly the same as super resoltuion, except now the loss includes the style loss.
###Code
shp = arr_hr.shape[1:]
style = Image.open(path + 'nst/starry-night.png')
# style = style.resize(np.divide(style.size, 3.5).astype('int32'))
style = np.array(style)[:shp[0], :shp[1], :shp[2]]
plt.imshow(style);
def res_crop_block(ip, nf=64):
x = conv_block(ip, nf, 3, (1,1,), 'valid')
x = conv_block(x, nf, 3, (1,1), 'valid', False)
ip = Lambda(lambda x: x[:, 2:-2, 2:-2])(ip)
return merge([x, ip], mode='sum')
inp=Input(shp)
x=ReflectionPadding2D((40,40))(inp)
x=conv_block(x, 64, 9, (1,1))
x=conv_block(x, 64, 3)
x=conv_block(x, 64, 3)
for i in range(5): x=res_crop_block(x)
x=up_block(x, 64, 3)
x=up_block(x, 64, 3)
x=Convolution2D(3, 9, 9, activation='tanh', border_mode='same')(x)
outp=Lambda(lambda x: (x+1)*128.5)(x)
vgg_inp=Input(shp)
vgg=VGG16(include_top=False, input_tensor=Lambda(preproc)(vgg_inp))
for λ in vgg.layers: λ.trainable=False
def get_outp(m, ln): return m.get_layer(f'block{ln}_conv2').output
vgg_content = Model(vgg_inp, [get_outp(vgg, o) for o in [2,3,4,5]])
###Output
_____no_output_____
###Markdown
Here we alter the super resolution approach by adding style outputs:
###Code
style_targs = [K.variable(o) for o in
vgg_content.predict(np.expand_dims(style, 0))]
[K.eval(K.shape(o)) for o in style_targs]
vgg1 = vgg_content(vgg_inp)
vgg2 = vgg_content(outp)
###Output
_____no_output_____
###Markdown
Our loss now includes the MSE for the content loss and the Gram Matrix for the style
###Code
def gram_matrix_b(x):
x = K.permute_dimensions(x, (0, 3, 1, 2))
s = K.shape(x)
feat = K.reshape(x, (s[0], s[1], s[2]*s[3]))
return K.batch_dot(feat, K.permute_dimensions(feat, (0, 2, 1))
) / K.prod(K.cast(s[1:], K.floatx()))
w = [0.1, 0.2, 0.6, 0.1]
def tot_loss(x):
loss = 0; n = len(style_targs)
for i in range(n):
loss += mean_sqr_b(gram_matrix_b(x[i+n]) - gram_matrix_b(style_targs[i])) / 2.
loss += mean_sqr_b(x[i]-x[i+n]) * w[i]
return loss
loss = Lambda(tot_loss)(vgg1 + vgg2)
m_style = Model([inp, vgg_inp], loss)
targ = np.zeros((arr_hr.shape[0], 1))
m_style.compile('adam', 'mse')
m_style.fit([arr_hr, arr_hr], targ, 4, 2, **pars)
m_style.save_weights(path + 'lesson9/results/' + 'style_final.h5')
K.set_value(m_style.optimizer.lr, 1e-4)
m_style.fit([arr_hr, arr_hr], targ, 4, 1, **pars)
top_model = Model(inp, outp)
###Output
_____no_output_____
###Markdown
Now we can pass any image through this CNN and it'll prodice the desired style.
###Code
p = top_model.predict(arr_hr[:10])
plt.imshow(np.round(p[0]).astype('uint8'))
###Output
_____no_output_____
###Markdown
*this is kind of hillarious*
###Code
top_model.save_weights(path + 'lesson9/results/style_final.h5')
# top_model.load_weights(path + 'lesson9/results/style_final.h5')
###Output
_____no_output_____ |
chapter03/01-prep_data.ipynb | ###Markdown
This notebook is developed using the `Python 3 (Data Science)` kernel on an `ml.t3.medium` instance.
###Code
!pip install -q awswrangler
import pandas as pd
import numpy as np
import boto3
import sagemaker
sess = sagemaker.Session()
bucket = sess.default_bucket()
prefix = 'sagemaker-studio-book/chapter03'
!aws s3 cp s3://sagemaker-sample-files/datasets/tabular/synthetic/churn.txt ./
df=pd.read_csv('./churn.txt')
df['CustomerID']=df.index
df.head()
columns_with_nan = ['Account Length', 'CustServ Calls']
df2 = df.copy()
df2[columns_with_nan] = df2[columns_with_nan].mask(np.random.random(df[columns_with_nan].shape) < 5e-2)
df2.head()
customer_columns = ['CustomerID', 'State', 'Area Code', 'Phone']
account_columns = ['CustomerID', 'Account Length', "Int'l Plan", 'VMail Plan', 'Churn?']
utility_columns = ['CustomerID', 'VMail Message', 'Day Mins', 'Day Calls', 'Day Charge',
'Eve Mins', 'Eve Calls', 'Eve Charge', 'Night Mins', 'Night Calls',
'Night Charge', 'Intl Mins', 'Intl Calls', 'Intl Charge', 'CustServ Calls']
wr.catalog.delete_database(db_name)
import awswrangler as wr
databases = wr.catalog.databases()
print(databases)
db_name = 'telco_db'
if db_name not in databases.values:
wr.catalog.create_database(db_name, description = 'Sample DB for telco churn dataset')
print(wr.catalog.databases())
else:
print(f"Database {db_name} already exists")
dfs = []
suffix = ['customer_info', 'account_info', 'utility']
for i, columns in enumerate([customer_columns, account_columns, utility_columns]):
df_tmp = df2[columns]
print(columns)
df_tmp.head()
dfs.append(df_tmp)
fname = 'telco_churn_%s' % suffix[i]
outputpath = f's3://{bucket}/{prefix}/data/{fname}'
print(outputpath)
if i > 1:
wr.s3.to_csv(
df=df_tmp,
path=outputpath,
dataset=True,
database=db_name, # Athena/Glue database
table=fname, # Athena/Glue table
index=False,
mode='overwrite')
else:
wr.s3.to_csv(
df=df_tmp,
path=f'{outputpath}.csv',
index=False)
###Output
_____no_output_____ |
Part 3 - Classification/Section 20 - Naive Bayes/ML_AZ_Sec20_NaiveBayes.ipynb | ###Markdown
Naive Bayes1. Apply the Kernel SVM classification algorithm to predict a category2. **Input** = Social_Network_Ads.csv Importing the libraries
###Code
import numpy as np
import matplotlib.pyplot as plt #graphs
import pandas as pd
###Output
_____no_output_____
###Markdown
Importing the dataset
###Code
dataset = pd.read_csv('Social_Network_Ads.csv')
x = dataset.iloc[:, :-1].values #There is no need to use the column Position
y = dataset.iloc[:,-1].values
###Output
_____no_output_____
###Markdown
Splitting the dataset into the Training set and Test set
###Code
# Multiple Linear Regression: Split dataset into Traning and Test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.25, random_state = 0)
###Output
_____no_output_____
###Markdown
Feature Scaling
###Code
print(X_train)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
print(X_train)
###Output
[[ 0.58164944 -0.88670699]
[-0.60673761 1.46173768]
[-0.01254409 -0.5677824 ]
[-0.60673761 1.89663484]
[ 1.37390747 -1.40858358]
[ 1.47293972 0.99784738]
[ 0.08648817 -0.79972756]
[-0.01254409 -0.24885782]
[-0.21060859 -0.5677824 ]
[-0.21060859 -0.19087153]
[-0.30964085 -1.29261101]
[-0.30964085 -0.5677824 ]
[ 0.38358493 0.09905991]
[ 0.8787462 -0.59677555]
[ 2.06713324 -1.17663843]
[ 1.07681071 -0.13288524]
[ 0.68068169 1.78066227]
[-0.70576986 0.56295021]
[ 0.77971394 0.35999821]
[ 0.8787462 -0.53878926]
[-1.20093113 -1.58254245]
[ 2.1661655 0.93986109]
[-0.01254409 1.22979253]
[ 0.18552042 1.08482681]
[ 0.38358493 -0.48080297]
[-0.30964085 -0.30684411]
[ 0.97777845 -0.8287207 ]
[ 0.97777845 1.8676417 ]
[-0.01254409 1.25878567]
[-0.90383437 2.27354572]
[-1.20093113 -1.58254245]
[ 2.1661655 -0.79972756]
[-1.39899564 -1.46656987]
[ 0.38358493 2.30253886]
[ 0.77971394 0.76590222]
[-1.00286662 -0.30684411]
[ 0.08648817 0.76590222]
[-1.00286662 0.56295021]
[ 0.28455268 0.07006676]
[ 0.68068169 -1.26361786]
[-0.50770535 -0.01691267]
[-1.79512465 0.35999821]
[-0.70576986 0.12805305]
[ 0.38358493 0.30201192]
[-0.30964085 0.07006676]
[-0.50770535 2.30253886]
[ 0.18552042 0.04107362]
[ 1.27487521 2.21555943]
[ 0.77971394 0.27301877]
[-0.30964085 0.1570462 ]
[-0.01254409 -0.53878926]
[-0.21060859 0.1570462 ]
[-0.11157634 0.24402563]
[-0.01254409 -0.24885782]
[ 2.1661655 1.11381995]
[-1.79512465 0.35999821]
[ 1.86906873 0.12805305]
[ 0.38358493 -0.13288524]
[-1.20093113 0.30201192]
[ 0.77971394 1.37475825]
[-0.30964085 -0.24885782]
[-1.6960924 -0.04590581]
[-1.00286662 -0.74174127]
[ 0.28455268 0.50496393]
[-0.11157634 -1.06066585]
[-1.10189888 0.59194336]
[ 0.08648817 -0.79972756]
[-1.00286662 1.54871711]
[-0.70576986 1.40375139]
[-1.29996338 0.50496393]
[-0.30964085 0.04107362]
[-0.11157634 0.01208048]
[-0.30964085 -0.88670699]
[ 0.8787462 -1.3505973 ]
[-0.30964085 2.24455257]
[ 0.97777845 1.98361427]
[-1.20093113 0.47597078]
[-1.29996338 0.27301877]
[ 1.37390747 1.98361427]
[ 1.27487521 -1.3505973 ]
[-0.30964085 -0.27785096]
[-0.50770535 1.25878567]
[-0.80480212 1.08482681]
[ 0.97777845 -1.06066585]
[ 0.28455268 0.30201192]
[ 0.97777845 0.76590222]
[-0.70576986 -1.49556302]
[-0.70576986 0.04107362]
[ 0.48261718 1.72267598]
[ 2.06713324 0.18603934]
[-1.99318916 -0.74174127]
[-0.21060859 1.40375139]
[ 0.38358493 0.59194336]
[ 0.8787462 -1.14764529]
[-1.20093113 -0.77073441]
[ 0.18552042 0.24402563]
[ 0.77971394 -0.30684411]
[ 2.06713324 -0.79972756]
[ 0.77971394 0.12805305]
[-0.30964085 0.6209365 ]
[-1.00286662 -0.30684411]
[ 0.18552042 -0.3648304 ]
[ 2.06713324 2.12857999]
[ 1.86906873 -1.26361786]
[ 1.37390747 -0.91570013]
[ 0.8787462 1.25878567]
[ 1.47293972 2.12857999]
[-0.30964085 -1.23462472]
[ 1.96810099 0.91086794]
[ 0.68068169 -0.71274813]
[-1.49802789 0.35999821]
[ 0.77971394 -1.3505973 ]
[ 0.38358493 -0.13288524]
[-1.00286662 0.41798449]
[-0.01254409 -0.30684411]
[-1.20093113 0.41798449]
[-0.90383437 -1.20563157]
[-0.11157634 0.04107362]
[-1.59706014 -0.42281668]
[ 0.97777845 -1.00267957]
[ 1.07681071 -1.20563157]
[-0.01254409 -0.13288524]
[-1.10189888 -1.52455616]
[ 0.77971394 -1.20563157]
[ 0.97777845 2.07059371]
[-1.20093113 -1.52455616]
[-0.30964085 0.79489537]
[ 0.08648817 -0.30684411]
[-1.39899564 -1.23462472]
[-0.60673761 -1.49556302]
[ 0.77971394 0.53395707]
[-0.30964085 -0.33583725]
[ 1.77003648 -0.27785096]
[ 0.8787462 -1.03167271]
[ 0.18552042 0.07006676]
[-0.60673761 0.8818748 ]
[-1.89415691 -1.40858358]
[-1.29996338 0.59194336]
[-0.30964085 0.53395707]
[-1.00286662 -1.089659 ]
[ 1.17584296 -1.43757673]
[ 0.18552042 -0.30684411]
[ 1.17584296 -0.74174127]
[-0.30964085 0.07006676]
[ 0.18552042 2.09958685]
[ 0.77971394 -1.089659 ]
[ 0.08648817 0.04107362]
[-1.79512465 0.12805305]
[-0.90383437 0.1570462 ]
[-0.70576986 0.18603934]
[ 0.8787462 -1.29261101]
[ 0.18552042 -0.24885782]
[-0.4086731 1.22979253]
[-0.01254409 0.30201192]
[ 0.38358493 0.1570462 ]
[ 0.8787462 -0.65476184]
[ 0.08648817 0.1570462 ]
[-1.89415691 -1.29261101]
[-0.11157634 0.30201192]
[-0.21060859 -0.27785096]
[ 0.28455268 -0.50979612]
[-0.21060859 1.6067034 ]
[ 0.97777845 -1.17663843]
[-0.21060859 1.63569655]
[ 1.27487521 1.8676417 ]
[-1.10189888 -0.3648304 ]
[-0.01254409 0.04107362]
[ 0.08648817 -0.24885782]
[-1.59706014 -1.23462472]
[-0.50770535 -0.27785096]
[ 0.97777845 0.12805305]
[ 1.96810099 -1.3505973 ]
[ 1.47293972 0.07006676]
[-0.60673761 1.37475825]
[ 1.57197197 0.01208048]
[-0.80480212 0.30201192]
[ 1.96810099 0.73690908]
[-1.20093113 -0.50979612]
[ 0.68068169 0.27301877]
[-1.39899564 -0.42281668]
[ 0.18552042 0.1570462 ]
[-0.50770535 -1.20563157]
[ 0.58164944 2.01260742]
[-1.59706014 -1.49556302]
[-0.50770535 -0.53878926]
[ 0.48261718 1.83864855]
[-1.39899564 -1.089659 ]
[ 0.77971394 -1.37959044]
[-0.30964085 -0.42281668]
[ 1.57197197 0.99784738]
[ 0.97777845 1.43274454]
[-0.30964085 -0.48080297]
[-0.11157634 2.15757314]
[-1.49802789 -0.1038921 ]
[-0.11157634 1.95462113]
[-0.70576986 -0.33583725]
[-0.50770535 -0.8287207 ]
[ 0.68068169 -1.37959044]
[-0.80480212 -1.58254245]
[-1.89415691 -1.46656987]
[ 1.07681071 0.12805305]
[ 0.08648817 1.51972397]
[-0.30964085 0.09905991]
[ 0.08648817 0.04107362]
[-1.39899564 -1.3505973 ]
[ 0.28455268 0.07006676]
[-0.90383437 0.38899135]
[ 1.57197197 -1.26361786]
[-0.30964085 -0.74174127]
[-0.11157634 0.1570462 ]
[-0.90383437 -0.65476184]
[-0.70576986 -0.04590581]
[ 0.38358493 -0.45180983]
[-0.80480212 1.89663484]
[ 1.37390747 1.28777882]
[ 1.17584296 -0.97368642]
[ 1.77003648 1.83864855]
[-0.90383437 -0.24885782]
[-0.80480212 0.56295021]
[-1.20093113 -1.5535493 ]
[-0.50770535 -1.11865214]
[ 0.28455268 0.07006676]
[-0.21060859 -1.06066585]
[ 1.67100423 1.6067034 ]
[ 0.97777845 1.78066227]
[ 0.28455268 0.04107362]
[-0.80480212 -0.21986468]
[-0.11157634 0.07006676]
[ 0.28455268 -0.19087153]
[ 1.96810099 -0.65476184]
[-0.80480212 1.3457651 ]
[-1.79512465 -0.59677555]
[-0.11157634 0.12805305]
[ 0.28455268 -0.30684411]
[ 1.07681071 0.56295021]
[-1.00286662 0.27301877]
[ 1.47293972 0.35999821]
[ 0.18552042 -0.3648304 ]
[ 2.1661655 -1.03167271]
[-0.30964085 1.11381995]
[-1.6960924 0.07006676]
[-0.01254409 0.04107362]
[ 0.08648817 1.05583366]
[-0.11157634 -0.3648304 ]
[-1.20093113 0.07006676]
[-0.30964085 -1.3505973 ]
[ 1.57197197 1.11381995]
[-0.80480212 -1.52455616]
[ 0.08648817 1.8676417 ]
[-0.90383437 -0.77073441]
[-0.50770535 -0.77073441]
[-0.30964085 -0.91570013]
[ 0.28455268 -0.71274813]
[ 0.28455268 0.07006676]
[ 0.08648817 1.8676417 ]
[-1.10189888 1.95462113]
[-1.6960924 -1.5535493 ]
[-1.20093113 -1.089659 ]
[-0.70576986 -0.1038921 ]
[ 0.08648817 0.09905991]
[ 0.28455268 0.27301877]
[ 0.8787462 -0.5677824 ]
[ 0.28455268 -1.14764529]
[-0.11157634 0.67892279]
[ 2.1661655 -0.68375498]
[-1.29996338 -1.37959044]
[-1.00286662 -0.94469328]
[-0.01254409 -0.42281668]
[-0.21060859 -0.45180983]
[-1.79512465 -0.97368642]
[ 1.77003648 0.99784738]
[ 0.18552042 -0.3648304 ]
[ 0.38358493 1.11381995]
[-1.79512465 -1.3505973 ]
[ 0.18552042 -0.13288524]
[ 0.8787462 -1.43757673]
[-1.99318916 0.47597078]
[-0.30964085 0.27301877]
[ 1.86906873 -1.06066585]
[-0.4086731 0.07006676]
[ 1.07681071 -0.88670699]
[-1.10189888 -1.11865214]
[-1.89415691 0.01208048]
[ 0.08648817 0.27301877]
[-1.20093113 0.33100506]
[-1.29996338 0.30201192]
[-1.00286662 0.44697764]
[ 1.67100423 -0.88670699]
[ 1.17584296 0.53395707]
[ 1.07681071 0.53395707]
[ 1.37390747 2.331532 ]
[-0.30964085 -0.13288524]
[ 0.38358493 -0.45180983]
[-0.4086731 -0.77073441]
[-0.11157634 -0.50979612]
[ 0.97777845 -1.14764529]
[-0.90383437 -0.77073441]
[-0.21060859 -0.50979612]
[-1.10189888 -0.45180983]
[-1.20093113 1.40375139]]
###Markdown
Training the Classification model on the whole dataset
###Code
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Predicting a new result
###Code
print(X_test[[5]])
classifier.predict(X_test[[7]])
###Output
_____no_output_____
###Markdown
Predicting the TEst set results
###Code
y_pred = classifier.predict(X_test)
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
###Output
[[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[1 1]
[0 0]
[1 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[1 0]
[0 0]
[0 0]
[1 1]
[0 0]
[0 0]
[1 1]
[0 0]
[1 1]
[0 0]
[1 1]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 1]
[1 1]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[1 1]
[0 0]
[0 0]
[0 0]
[0 0]
[1 1]
[0 0]
[0 0]
[1 1]
[0 0]
[1 1]
[1 1]
[0 0]
[0 0]
[1 0]
[1 1]
[0 1]
[0 0]
[0 0]
[1 1]
[0 0]
[0 0]
[1 1]
[0 0]
[0 1]
[0 0]
[1 1]
[0 0]
[0 0]
[0 0]
[0 0]
[1 1]
[0 0]
[0 0]
[1 1]
[0 0]
[0 0]
[0 0]
[0 0]
[1 1]
[1 1]
[1 1]
[1 0]
[0 0]
[0 0]
[1 1]
[0 1]
[0 0]
[1 1]
[1 1]
[0 0]
[0 0]
[1 1]
[0 0]
[0 0]
[0 0]
[0 1]
[0 0]
[1 1]
[1 1]
[1 1]]
###Markdown
Making the Confusion MatrixConfusion Matrix shows how many mistakes the model has made
###Code
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test,y_pred)
print(cm)
# [[Correct Incorrect]
# [Incorrect Correct]]
acc = accuracy_score(y_test,y_pred)
print(acc)
###Output
[[64 4]
[ 5 27]]
0.91
###Markdown
Visualising the Training set results
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = sc.inverse_transform(X_train), y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10 , stop = X_set[:, 0].max() + 10, step = 0.25),
np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25))
plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i,j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red','green'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
###Markdown
Visualising the Test set results
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = sc.inverse_transform(X_test), y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10 , stop = X_set[:, 0].max() + 10, step = 0.35),
np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.35))
plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i,j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red','green'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
|
Hawkes_process_1126.ipynb | ###Markdown
Hawkes equation $$ lamda = (1-p)u(x,y)+ (p*q*b)/\pi* \sum\limits _{i=t_i <t} exp(-q(t-t_i)-b((x-x_i)^2+(y-y_i)^2))) $$
###Code
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
def func(X, p, a, b):
t,x,y = X
A=np.zeros(len(t))
for i in range(1,len(t)):
A[i]= np.exp(-a*(t[i]-t[i-1])\
-b*((x[i]-x[i-1])**2+(y[i]-y[i-1])**2))+ A[i-1]
# f_logsum=np.sum(np.log(p+(p*q*b/math.pi)*A))
# lamda=(1-p)+((p)*a*b/math.pi)*np.cumsum(A)
lamda=(1-p)+((p)*a*b/math.pi)*A
return lamda
date=dff['Date'][0:len(t)]
popt, pcov = curve_fit(func,(t,y,x),Opt,bounds=(0,[1,100,100]))
# np.shape(t),np.shape(y),np.shape(x)
print(popt,pcov)
p,a,b=popt
plt.plot(date,Opt,label='Observed')
M=func((t,x,y),p,a,b)
plt.plot(date,M,label='Fitted')
plt.title('Fitted plot')
plt.xlabel('Time')
plt.ylabel('Output')
plt.legend()
plt.xticks(np.arange(0,500,90))
plt.show()
plt.imshow(pcov);plt.colorbar()
plt.title('Corelation of cof. plot')
# find the R-square value between model value and ASF cases
Y_model=np.array(M.astype(int))
y_obj=np.array(Opt.astype(int))
corr_matrix = np.corrcoef(y_obj, Y_model)
corr = corr_matrix[0,1]
R_sq = corr**2
print(f'The R-square value from numpy model between model and observed data is {round(R_sq,4)}')
from sklearn.metrics import r2_score
R_sq=r2_score(y_obj, Y_model)
print(f'The R-square value from sklearn model between model and observed data is {round(R_sq,4)}')
#Predicted time and new coordinates
np.random.seed(0)
t0,x0,y0,opt0=t[-1],x[-1],y[-1],Opt[-1]
tp=[]
for tt in range(1,301): # lets predict for 100 days ahead
tp.append(t[-1]+tt)
# xp=np.arange(min(x),max(x),10)
xp=np.random.choice(np.arange(min(x),max(x),0.00005) ,10000,replace=False) # 10000 random x points betn min and max of x
yp=np.random.choice(np.arange(min(y),max(y),0.00005) ,10000,replace=False)
plt.scatter(xp,yp)
plt.title('lets think possible excitation sites')
Time=[t0];x_cord=[x0];y_cord=[y0];cnt=[opt0] # simulatiion time, coordinates and cnt as counts
print(t0,x0,y0,opt0, Time,cnt[-1],x_cord,y_cord)
print(tp[0],Time[-1])
Time[-1],tp[0], p,a,b
# cntt=[1687]
# for j in range(10):
# # print(tp[j],Time[-1])
# cnt=[1687]
# for i in range(1500):
# # cnt[-1]=(1-p)+(np.exp(-a*(tp[j]-Time[-1])\
# # -b*((xp[i]-x_cord[-1])**2+(yp[i]-y_cord[-1])**2)))*((p*a*b/math.pi))+ cnt[-1]
# cnt[-1]=(np.exp(-a*(tp[j]-Time[-1])\
# -b*((xp[i]-x_cord[-1])**2+(yp[i]-y_cord[-1])**2)))*((p*a*b/math.pi))+ cnt[-1]
# cnt.append(cnt[-1])
# if cnt[-1]>1687:
# print(cnt[-1])
(np.exp(-a*(0)-b*((127 -x_cord[-1])**2+(37-y_cord[-1])**2)))*((p*a*b/math.pi))
cntt=[1687] # current infected numbers
for itrn in range(10): # lets check for next 10 dayss
for tm in tp: # check for each time period
for xt,yt in zip(xp,yp):
extn=(np.exp(-a*(tm-Time[-1])\
-b*((xt-x_cord[-1])**2+(yt-y_cord[-1])**2)))*((p*a*b/math.pi))
if np.round(extn)>=1:
print(extn,tm, xt, yt)
# question what happened if 2 outbreaks present once at a time in a same point
# assumption is outbreak once at a point in particular time
# def predict_time_n_cord(X, p, a, b,t0,x0,y0,opt0): # (x) time, coordiinates, (p,a,b) constants, predict_start_time, opt0: current outbreak number
# t,x,y = X
Time=[t0];x_cord=[x0];y_cord=[y0];count=[opt0]
for j in range(5):
print(tp[j],Time[-1])
for i in range(0,10):
# excitation =(1-p)+(p*a*b/math.pi)*(np.exp((-a*(t[j]-time[-1])\
# -b*((x[i]-x_cord[-1])**2+(y[i]-y_cord[-1])**2))))#+count[-1]) #A[i-1] # cumulatinve value is not considered
# excitation =(1-p)+(p*a*b/math.pi)*(np.exp(-a*(tp[j]-time[-1])\
# -b*((xp[i]-x_cord[-1])**2+(yp[i]-y_cord[-1])**2)))\
# + count[-1]/(1-p)+(p*a*b/math.pi)
excitation =(np.exp(-a*(tp[j]-Time[-1])\
-b*((xp[i]-x_cord[-1])**2+(yp[i]-y_cord[-1])**2)))+count[-1]
# excitation =(p*a*b/math.pi)*(np.exp(-a*(tp[j]-time[-1])\
# -b*((xp[i]-x_cord[-1])**2+(yp[i]-y_cord[-1])**2)))
# excitation =(np.exp(-a*(tp[j]-time[-1])\
# -b*((xp[i]-x_cord[-1])**2+(yp[i]-y_cord[-1])**2)))
# print(excitation,xp[i])
count.append(count[-1]+1)
Time.append(tp[j])
# if (round(excitation)-count[-1])>=1:
# # if excitation>1:
# print(tp[j],count[-1],time[-1])
# print('done')
# # print(excitation,tp[j],xp[i],yp[i])
# time.append(tp[j])
# x_cord.append(xp[i])
# y_cord.append(yp[i])
# count.append(count[-1]+1)
# print('what is happening ')
# f_logsum=np.sum(np.log(p+(p*q*b/math.pi)*A))
# lamda=(1-p)+((p)*a*b/math.pi)*A
# return lamda
##################################### ALL together ########################
# setting the new function with new approach
def func(X, p, a, b):
t,x,y = X
A=[0]
for i in range(1,len(t)):
xxx=(np.exp((-a*(t[i]-t[i-1])\
-b*((x[i]-x[i-1])**2+(y[i]-y[i-1])**2))))+A[i-1]
A.append(xxx)
lamda=(1-p)+(p*a*b/math.pi)*np.array(A)
return lamda
popt, pcov = curve_fit(func,(t,y,x),Opt,bounds=(0,[1,100,100]))
p,a,b=popt
print(popt)
M=func((t,x,y),p,a,b)
p,a,b=popt
plt.plot(date,Opt,label='Observed')
M=func((t,x,y),p,a,b) # lets check first 1000 items
plt.plot(date,M,label='Fitted') # lets plot first 1000 items
plt.title('Fitted plot')
plt.xlabel('Time')
plt.ylabel('Output')
plt.legend()
plt.xticks(np.arange(0,500,90))
plt.show()
# find the R-square value between model value and ASF cases
Y_model=np.array(M)
y_obj=np.array(Opt.astype(int))
corr_matrix = np.corrcoef(y_obj, Y_model)
corr = corr_matrix[0,1]
R_sq = corr**2
print(f'The R-square value from numpy model between model and observed data is {round(R_sq,4)}')
#lets, check only for first 1000 data
t[:1000];y[:1000];x[:1000]
popt, pcov = curve_fit(func,(t,y,x),Opt,bounds=(0,[1,100,100]))
popt
M=func((t[:1000],x[:1000],y[:1000]),p,a,b) # lets check first 1000 items
plt.plot(date[:1000],M,label='Fitted') # lets plot first 1000 items
plt.plot(date[:1000],Opt[:1000],label='Observed')
plt.xticks(np.arange(0,500,90))
plt.show()
M[:20]*(p*a*b/math.pi)+10
p,a,b=popt
def func(X, p, a, b):
t,x,y = X
A=np.zeros(len(t))
for i in range(1,len(t)):
A[i-1]= np.exp((-a*(t[i]-t[i-1])\
-b*((x[i]-x[i-1])**2+(y[i]-y[i-1])**2)))+A[i-1]
# print(round(A[i-1]))
# f_logsum=np.sum(np.log(p+(p*q*b/math.pi)*A))
# lamda=(1-p)+((p)*a*b/math.pi)*np.cumsum(A) #if saved as A[i-1]
lamda=(1-p)+((p)*a*b/math.pi)*(A) #if saved as A[i]
lamda.astype(int)
print(np.sum(A),len(A))
plt.plot(A[:10])
plt.show()
print((A[:10]))
return lamda
M=func((t,x,y),p,a,b)
print(max(M))
plt.plot(M)
print(max(M),min(M))
plt.plot(func((tp,xp,yp),p,a,b))
###Output
_____no_output_____ |
helper.ipynb | ###Markdown
Copy
###Code
import os
target_dir = "/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/sim_SE_results_cuts"
for dirpath, dirnames, filenames in os.walk("./sim_denoised/models_sim_2_58"):
for dirname in dirnames:
if dirname=="DeepFL":
full_dirpath = os.path.join(dirpath, dirname)
env = dirpath.split("/")[-1]
full_target_dirpath = os.path.join(target_dir, env)
print(full_dirpath, full_target_dirpath)
# !mv $full_dirpath $new_full_dirpath
!cp -rf $full_dirpath $full_target_dirpath
import os
target_dir = "/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts"
for dirpath, dirnames, filenames in os.walk(target_dir):
if not dirpath.split("/")[-1]=="DeepFL-Pre":
continue
for filename in filenames:
full_dirpath = os.path.join(dirpath, filename)
new_full_dirpath = os.path.join(dirpath, filename.replace("DeepFL", "DeepFL-Pre"))
print(full_dirpath, new_full_dirpath)
!mv $full_dirpath $new_full_dirpath
# !cp -rf $new_full_dirpath $full_target_dirpath
###Output
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_office1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom2/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office2/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_iphone_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_confroom2/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_balcony1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_bedroom1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_office2/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_office1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_bedroom1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_balcony1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipad_livingroom1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipadflat_office1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/f10_script5_ipadflat_confroom1/DeepFL-Pre/f10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_ipad_confroom1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00017.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00017.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00026.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00026.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00008.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00008.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00000.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00000.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00022.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00022.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00004.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00004.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00013.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00013.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00019.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00019.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00020.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00020.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00002.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00002.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00011.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00011.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00024.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00024.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00006.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00006.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00015.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00015.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00018.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00018.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00027.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00027.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00009.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00009.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00001.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00001.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00010.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00010.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00023.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00023.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00005.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00005.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00014.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00014.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00021.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00021.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00003.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00003.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00012.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00012.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00025.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00025.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00007.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00007.wav
/trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL_00016.wav /trainman-mount/trainman-storage-420a420f-b7a2-4445-abca-0081fc7108ca/daps_SE_results_cuts/m10_script5_iphone_livingroom1/DeepFL-Pre/m10_DeepFL-Pre_00016.wav
###Markdown
Playing with the dictionary to help in Wordle[Wordle](https://www.powerlanguage.co.uk/wordle/) is a fun little game.Besides being fun, it offers the opportunity of playing a bit with Python and Jupyter. Both are things that I am trying to learn.First lets load a word list from the computers dictionary
###Code
word_list = open("/usr/share/dict/words","r").readlines()
word_list[:5]
###Output
_____no_output_____
###Markdown
Then let's clean up a bit the word list to get all the words with 5 letters.
###Code
words = [x.strip().lower() for x in word_list if len(x.strip()) == 5 ]
###Output
_____no_output_____
###Markdown
Count the letter appearances:
###Code
from collections import defaultdict
freq = defaultdict(int) # a dictionary with default value 0 for all keys
for w in words:
for c in w:
freq[c] += 1
###Output
_____no_output_____
###Markdown
And then compute their relative frequency (this step we may avoid).
###Code
total = sum(freq.values())
for k, v in freq.items():
freq[k] = freq[k]/total
###Output
_____no_output_____
###Markdown
I define now a function to compute the score of a word. A word is better when the unique letters it contains appear more frequently as dictionary entries.
###Code
def score(w):
s = 0
for letter in set(w):
s += freq[letter]
return s
sc = [ (w, score(w)) for w in words] # score each element of the list
sc.sort(key=lambda x:x[1], reverse=True) # sort the list according to score
sc[:5]
###Output
_____no_output_____
###Markdown
According to this metric, the best word to start the game with should be the first one. Since we have a sorted list, we need the first element (extracted by [0]) and the first component of the pair (the second [0]) since we do not really care about the frequency.
###Code
sc[0][0]
###Output
_____no_output_____
###Markdown
'arose' is a good candidate. I would have expected that 'arise' ranked higher. And furthermore, I like starting words that end with 's' (to know if the word we are looking for ends is plural). However, so far we did not tell the score function that we like words that end with 's', so that is my fault really.So my personal favourite word to start would be 'aries' but it is not in the game's dictionary. The program gets pretty close nonetheless. Here I run out of steam a bit, but still I defined what a not bad word is (i.e.: one that does not contain letters we know do not appear in the solution).However, bad letters are just a constant that one needs to add.
###Code
def not_bad(w, bad):
return all([not(c in bad) for c in w])
def good(w, g):
return all([c in w for c in g])
bad_letters = ""
good_letters = ""
fsc = [w for (w, _) in sc if not_bad(w, bad_letters) and good(w, good_letters)]
###Output
_____no_output_____
###Markdown
Then I define a function to match words with letters in good positions. So the spec is a five character string with dots as placeholders for unknown letters, and the letters we know otherwise.
###Code
def match_spec(w, sp):
for i in range(0,len(sp)):
if sp[i] != ".":
if w[i] != sp[i]:
return False
return True
###Output
_____no_output_____
###Markdown
With that we can try to find the words that are not bad, and that match the spec. And we get them sorted by their frequency. The dictionary contains many weird words that Wordle does not like, so you should try the less weird looking words.
###Code
pattern = "....."
[w for w in fsc if match_spec(w, pattern)][:20]
###Output
_____no_output_____ |
ai-platform-unified/notebooks/unofficial/gapic/custom/showcase_custom_image_classification_online.ipynb | ###Markdown
Vertex client library: Custom training image classification model for online prediction Run in Colab View on GitHub OverviewThis tutorial demonstrates how to use the Vertex client library for Python to train and deploy a custom image classification model for online prediction. DatasetThe dataset used for this tutorial is the [CIFAR10 dataset](https://www.tensorflow.org/datasets/catalog/cifar10) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck. ObjectiveIn this tutorial, you create a custom model from a Python script in a Google prebuilt Docker container using the Vertex client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using `gcloud` command-line tool or online using Google Cloud Console.The steps performed include:- Create a Vertex custom job for training a model.- Train a TensorFlow model.- Retrieve and load the model artifacts.- View the model evaluation.- Upload the model as a Vertex `Model` resource.- Deploy the `Model` resource to a serving `Endpoint` resource.- Make a prediction.- Undeploy the `Model` resource. CostsThis tutorial uses billable components of Google Cloud (GCP):* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. InstallationInstall the latest version of Vertex client library.
###Code
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
###Output
_____no_output_____
###Markdown
Install the latest GA version of *google-cloud-storage* library as well.
###Code
! pip3 install -U google-cloud-storage $USER_FLAG
###Output
_____no_output_____
###Markdown
Restart the kernelOnce you've installed the Vertex client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
###Code
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Before you begin GPU runtime*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.5. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
###Code
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/vertex-ai/docs/general/locations)
###Code
REGION = "us-central1" # @param {type: "string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.**Click Create service account**.In the **Service account name** field, enter a name, and click **Create**.In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.Click Create. A JSON file that contains your key downloads to your local environment.Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
###Code
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you submit a custom training job using the Vertex client library, you upload a Python packagecontaining your training code to a Cloud Storage bucket. Vertex runsthe code from this package. In this tutorial, Vertex also saves thetrained model that results from your job in the same bucket. You can thencreate an `Endpoint` resource based on this output in order to serveonline predictions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants Import Vertex client libraryImport the Vertex client library into our Python environment.
###Code
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
###Output
_____no_output_____
###Markdown
Vertex constantsSetup up the following constants for Vertex:- `API_ENDPOINT`: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.- `PARENT`: The Vertex location root path for dataset, model, job, pipeline and endpoint resources.
###Code
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
###Output
_____no_output_____
###Markdown
Hardware AcceleratorsSet the hardware accelerators (e.g., GPU), if any, for training and prediction.Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)For GPU, available accelerators include: - aip.AcceleratorType.NVIDIA_TESLA_K80 - aip.AcceleratorType.NVIDIA_TESLA_P100 - aip.AcceleratorType.NVIDIA_TESLA_P4 - aip.AcceleratorType.NVIDIA_TESLA_T4 - aip.AcceleratorType.NVIDIA_TESLA_V100Otherwise specify `(None, None)` to use a container image to run on a CPU.*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
###Code
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
###Output
_____no_output_____
###Markdown
Container (Docker) imageNext, we will set the Docker container images for training and prediction - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-3:latest` - TensorFlow 2.4 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-4:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-4:latest` - XGBoost - `gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1` - Scikit-learn - `gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest` - Pytorch - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-5:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-6:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-7:latest`For the latest list, see [Pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest` - XGBoost - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest` - Scikit-learn - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest`For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
###Code
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
###Output
_____no_output_____
###Markdown
Machine TypeNext, set the machine type to use for training and prediction.- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
###Code
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to start creating your own custom model and training for CIFAR10. Set up clientsThe Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.- Model Service for `Model` resources.- Endpoint Service for deployment.- Job Service for batch jobs and custom training.- Prediction Service for serving.
###Code
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
###Output
_____no_output_____
###Markdown
Train a modelThere are two ways you can train a custom model using a container image:- **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.- **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model. Prepare your custom job specificationNow that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following:- `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed)- `python_package_spec` : The specification of the Python package to be installed with the pre-built container. Prepare your machine specificationNow define the machine specification for your custom training job. This tells Vertex what type of machine instance to provision for the training. - `machine_type`: The type of GCP instance to provision -- e.g., n1-standard-8. - `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU. - `accelerator_count`: The number of accelerators.
###Code
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
###Output
_____no_output_____
###Markdown
Prepare your disk specification(optional) Now define the disk specification for your custom training job. This tells Vertex what type and size of disk to provision in each machine instance for the training. - `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. - `boot_disk_size_gb`: Size of disk in GB.
###Code
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
###Output
_____no_output_____
###Markdown
Define the worker pool specificationNext, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following:- `replica_count`: The number of instances to provision of this machine type.- `machine_spec`: The hardware specification.- `disk_spec` : (optional) The disk storage specification.- `python_package`: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module.Let's dive deeper now into the python package specification:-`executor_image_spec`: This is the docker image which is configured for your custom training job.-`package_uris`: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image.-`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix.-`args`: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting: - `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts: - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `"--epochs=" + EPOCHS`: The number of epochs for training. - `"--steps=" + STEPS`: The number of steps (batches) per epoch. - `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training. - `"single"`: single device. - `"mirror"`: all GPU devices on a single compute instance. - `"multi"`: all GPU devices on all compute instances.
###Code
JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_cifar10.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]
###Output
_____no_output_____
###Markdown
Assemble a job specificationNow assemble the complete description for the custom job specification:- `display_name`: The human readable name you assign to this custom job.- `job_spec`: The specification for the custom job. - `worker_pool_specs`: The specification for the machine VM instances. - `base_output_directory`: This tells the service the Cloud Storage location where to save the model artifacts (when variable `DIRECT = False`). The service will then pass the location to the training script as the environment variable `AIP_MODEL_DIR`, and the path will be of the form: /model
###Code
if DIRECT:
job_spec = {"worker_pool_specs": worker_pool_spec}
else:
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {"output_uri_prefix": MODEL_DIR},
}
custom_job = {"display_name": JOB_NAME, "job_spec": job_spec}
###Output
_____no_output_____
###Markdown
Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package.
###Code
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
###Output
_____no_output_____
###Markdown
Task.py contentsIn the next cell, you write the contents of the training script task.py. We won't go into detail, it's just there for you to browse. In summary:- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.- Loads CIFAR10 dataset from TF Datasets (tfds).- Builds a model using TF.Keras model API.- Compiles the model (`compile()`).- Sets a training distribution strategy according to the argument `args.distribute`.- Trains the model (`fit()`) with epochs and steps according to the arguments `args.epochs` and `args.steps`- Saves the trained model (`save(args.model_dir)`) to the specified model directory.
###Code
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
# Scaling CIFAR10 data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
# Build the Keras model
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
# Train the model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
###Output
_____no_output_____
###Markdown
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
###Code
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz
###Output
_____no_output_____
###Markdown
Train the modelNow start the training of your custom training job on Vertex. Use this helper function `create_custom_job`, which takes the following parameter:-`custom_job`: The specification for the custom job.The helper function calls job client service's `create_custom_job` method, with the following parameters:-`parent`: The Vertex location path to `Dataset`, `Model` and `Endpoint` resources.-`custom_job`: The specification for the custom job.You will display a handful of the fields returned in `response` object, with the two that are of most interest are:`response.name`: The Vertex fully qualified identifier assigned to this custom training job. You save this identifier for using in subsequent steps.`response.state`: The current state of the custom training job.
###Code
def create_custom_job(custom_job):
response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job)
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = create_custom_job(custom_job)
###Output
_____no_output_____
###Markdown
Now get the unique identifier for the custom job you created.
###Code
# The full unique ID for the custom job
job_id = response.name
# The short numeric ID for the custom job
job_short_id = job_id.split("/")[-1]
print(job_id)
###Output
_____no_output_____
###Markdown
Get information on a custom jobNext, use this helper function `get_custom_job`, which takes the following parameter:- `name`: The Vertex fully qualified identifier for the custom job.The helper function calls the job client service's`get_custom_job` method, with the following parameter:- `name`: The Vertex fully qualified identifier for the custom job.If you recall, you got the Vertex fully qualified identifier for the custom job in the `response.name` field when you called the `create_custom_job` method, and saved the identifier in the variable `job_id`.
###Code
def get_custom_job(name, silent=False):
response = clients["job"].get_custom_job(name=name)
if silent:
return response
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = get_custom_job(job_id)
###Output
_____no_output_____
###Markdown
DeploymentTraining the above model may take upwards of 20 minutes time.Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, we will need to know the location of the saved model, which the Python script saved in your local Cloud Storage bucket at `MODEL_DIR + '/saved_model.pb'`.
###Code
while True:
response = get_custom_job(job_id, True)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_path_to_deploy = None
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
print("Training Time:", response.update_time - response.create_time)
break
time.sleep(60)
print("model_to_deploy:", model_path_to_deploy)
###Output
_____no_output_____
###Markdown
Load the saved modelYour model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.
###Code
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
###Output
_____no_output_____
###Markdown
Evaluate the modelNow find out how good the model is. Load evaluation dataYou will load the CIFAR10 test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels.You don't need the training data, and hence why we loaded it as `(_, _)`.Before you can run the data through evaluation, you need to preprocess it:x_test:1. Normalize (rescaling) the pixel data by dividing each pixel by 255. This will replace each single byte integer pixel with a 32-bit floating point number between 0 and 1.y_test:2. The labels are currently scalar (sparse). If you look back at the `compile()` step in the `trainer/task.py` script, you will find that it was compiled for sparse labels. So we don't need to do anything more.
###Code
import numpy as np
from tensorflow.keras.datasets import cifar10
(_, _), (x_test, y_test) = cifar10.load_data()
x_test = (x_test / 255.0).astype(np.float32)
print(x_test.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Perform the model evaluationNow evaluate how well the model in the custom job did.
###Code
model.evaluate(x_test, y_test)
###Output
_____no_output_____
###Markdown
Upload the model for servingNext, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model. How does the serving function workWhen you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.The serving function consists of two parts:- `preprocessing function`: - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). - Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.- `post-processing function`: - Converts the model output to format expected by the receiving application -- e.q., compresses the output. - Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. Serving function for image dataTo pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes before it is passed as input to the deployed model.To resolve this, define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU).When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model:- `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB).- `image.convert_image_dtype` - Changes integer pixel values to float 32.- `image.resize` - Resizes the image to match the input shape for the model.- `resized / 255.0` - Rescales (normalization) the pixel data between 0 and 1.At this point, the data can be passed to the model (`m_call`).
###Code
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
rescale = tf.cast(resized / 255.0, tf.float32)
return rescale
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
m_call = tf.function(model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
tf.saved_model.save(
model, model_path_to_deploy, signatures={"serving_default": serving_fn}
)
###Output
_____no_output_____
###Markdown
Get the serving function signatureYou can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.For your purpose, you need the signature of the serving function. Why? Well, when we send our data for prediction as a HTTP request packet, the image data is base64 encoded, and our TF.Keras model takes numpy input. Your serving function will do the conversion from base64 to a numpy array.When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
###Code
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
###Output
_____no_output_____
###Markdown
Upload the modelUse this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions.The helper function takes the following parameters:- `display_name`: A human readable name for the `Endpoint` service.- `image_uri`: The container image for the model deployment.- `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`.The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters:- `parent`: The Vertex location root path for `Dataset`, `Model` and `Endpoint` resources.- `model`: The specification for the Vertex `Model` resource instance.Let's now dive deeper into the Vertex model specification `model`. This is a dictionary object that consists of the following fields:- `display_name`: A human readable name for the `Model` resource.- `metadata_schema_uri`: Since your model was built without an Vertex `Dataset` resource, you will leave this blank (`''`).- `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format.- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.Uploading a model into a Vertex Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready.The helper function returns the Vertex fully qualified identifier for the corresponding Vertex Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id.
###Code
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"cifar10-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
###Output
_____no_output_____
###Markdown
Get `Model` resource informationNow let's get the model information for just your model. Use this helper function `get_model`, with the following parameter:- `name`: The Vertex unique identifier for the `Model` resource.This helper function calls the Vertex `Model` client service's method `get_model`, with the following parameter:- `name`: The Vertex unique identifier for the `Model` resource.
###Code
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
###Output
_____no_output_____
###Markdown
Deploy the `Model` resourceNow deploy the trained Vertex custom `Model` resource. This requires two steps:1. Create an `Endpoint` resource for deploying the `Model` resource to.2. Deploy the `Model` resource to the `Endpoint` resource. Create an `Endpoint` resourceUse this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter:- `display_name`: A human readable name for the `Endpoint` resource.The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter:- `display_name`: A human readable name for the `Endpoint` resource.Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the `Endpoint` resource: `response.name`.
###Code
ENDPOINT_NAME = "cifar10_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
###Output
_____no_output_____
###Markdown
Now get the unique identifier for the `Endpoint` resource you created.
###Code
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
###Output
_____no_output_____
###Markdown
Compute instance scalingYou have several choices on scaling the compute instances for handling your online prediction requests:- Single Instance: The online prediction requests are processed on a single compute instance. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.- Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.- Auto Scaling: The online prediction requests are split across a scaleable number of compute instances. - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.
###Code
MIN_NODES = 1
MAX_NODES = 1
###Output
_____no_output_____
###Markdown
Deploy `Model` resource to the `Endpoint` resourceUse this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters:- `model`: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline.- `deploy_model_display_name`: A human readable name for the deployed model.- `endpoint`: The Vertex fully qualified endpoint identifier to deploy the model to.The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters:- `endpoint`: The Vertex fully qualified `Endpoint` resource identifier to deploy the `Model` resource to.- `deployed_model`: The requirements specification for deploying the model.- `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs. - If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic. - If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100.Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields:- `model`: The Vertex fully qualified model identifier of the (upload) model to deploy.- `display_name`: A human readable name for the deployed model.- `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.- `dedicated_resources`: This refers to how many compute instances (replicas) that are scaled for serving prediction requests. - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `min_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`. - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`. Traffic SplitLet's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision. ResponseThe method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources.
###Code
DEPLOYED_NAME = "cifar10_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
###Output
_____no_output_____
###Markdown
Make a online prediction requestNow do a online prediction to your deployed model. Get test itemYou will use an example out of the test (holdout) portion of the dataset as a test item.
###Code
test_image = x_test[0]
test_label = y_test[0]
print(test_image.shape)
###Output
_____no_output_____
###Markdown
Prepare the request contentYou are going to send the CIFAR10 image as compressed JPG image, instead of the raw uncompressed bytes:- `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image. - Denormalize the image data from \[0,1) range back to [0,255). - Convert the 32-bit floating point values to 8-bit unsigned integers.- `tf.io.read_file`: Read the compressed JPG images back into memory as raw bytes.- `base64.b64encode`: Encode the raw bytes into a base 64 encoded string.
###Code
import base64
import cv2
cv2.imwrite("tmp.jpg", (test_image * 255).astype(np.uint8))
bytes = tf.io.read_file("tmp.jpg")
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
###Output
_____no_output_____
###Markdown
Send the prediction requestOk, now you have a test image. Use this helper function `predict_image`, which takes the following parameters:- `image`: The test image data as a numpy array.- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed to.- `parameters_dict`: Additional parameters for serving.This function calls the prediction client service `predict` method with the following parameters:- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed to.- `instances`: A list of instances (encoded images) to predict.- `parameters`: Additional parameters for serving.To pass the image data to the prediction service, in the previous step you encoded the bytes into base64 -- which makes the content safe from modification when transmitting binary data over the network. You need to tell the serving binary where your model is deployed to, that the content has been base64 encoded, so it will decode it on the other end in the serving binary.Each instance in the prediction request is a dictionary entry of the form: {serving_input: {'b64': content}}- `input_name`: the name of the input layer of the underlying model.- `'b64'`: A key that indicates the content is base64 encoded.- `content`: The compressed JPG image bytes as a base64 encoded string.Since the `predict()` service can take multiple images (instances), you will send your single image as a list of one image. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the `predict()` service.The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction:- `predictions`: Confidence level for the prediction, between 0 and 1, for each of the classes.
###Code
def predict_image(image, endpoint, parameters_dict):
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: {"b64": image}}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters_dict
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_image(b64str, endpoint_id, None)
###Output
_____no_output_____
###Markdown
Undeploy the `Model` resourceNow undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters:- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to.- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to.This function calls the endpoint client service's method `undeploy_model`, with the following parameters:- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed.- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed.- `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource.Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}.
###Code
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all GCP resources used in this project, you can [delete the GCPproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Dataset- Pipeline- Model- Endpoint- Batch Job- Custom Job- Hyperparameter Tuning Job- Cloud Storage Bucket
###Code
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
###Output
_____no_output_____
###Markdown
AI Platform (Unified) client library: Custom training image classification model for online prediction Run in Colab View on GitHub OverviewThis tutorial demonstrates how to use the AI Platform (Unified) Python client library to train and deploy a custom image classification model for online prediction. DatasetThe dataset used for this tutorial is the [CIFAR10 dataset](https://www.tensorflow.org/datasets/catalog/cifar10) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck. ObjectiveIn this tutorial, you create a custom model from a Python script in a Google prebuilt Docker container using the AI Platform (Unified) client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using `gcloud` command-line tool or online using Google Cloud Console.The steps performed include:- Create a AI Platform (Unified) custom job for training a model.- Train a TensorFlow model.- Retrieve and load the model artifacts.- View the model evaluation.- Upload the model as a AI Platform (Unified) `Model` resource.- Deploy the `Model` resource to a serving `Endpoint` resource.- Make a prediction.- Undeploy the `Model` resource. CostsThis tutorial uses billable components of Google Cloud (GCP):* AI Platform (Unified)* Cloud StorageLearn about [AI Platform (Unified)pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. InstallationInstall the latest version of AI Platform (Unified) client library.
###Code
import sys
if "google.colab" in sys.modules:
USER_FLAG = ""
else:
USER_FLAG = "--user"
! pip3 install -U google-cloud-aiplatform $USER_FLAG
###Output
_____no_output_____
###Markdown
Install the latest GA version of *google-cloud-storage* library as well.
###Code
! pip3 install -U google-cloud-storage $USER_FLAG
###Output
_____no_output_____
###Markdown
Restart the kernelOnce you've installed the AI Platform (Unified) client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
###Code
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Before you begin GPU runtime*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)3. [Enable the AI Platform (Unified) APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in AI Platform (Unified) Notebooks.5. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
###Code
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for AI Platform (Unified). We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with AI Platform (Unified). Not all regions provide support for all AI Platform (Unified) services. For the latest support per region, see the [AI Platform (Unified) locations documentation](https://cloud.google.com/ai-platform-unified/docs/general/locations)
###Code
REGION = "us-central1" # @param {type: "string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using AI Platform (Unified) Notebooks**, your environment is already authenticated. Skip this step.**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.**Click Create service account**.In the **Service account name** field, enter a name, and click **Create**.In the **Grant this service account access to project** section, click the Role drop-down list. Type "AI Platform (Unified)" into the filter box, and select **AI Platform (Unified) Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.Click Create. A JSON file that contains your key downloads to your local environment.Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
###Code
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AI Platform, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you submit a custom training job using the AI Platform (Unified) client library, you upload a Python packagecontaining your training code to a Cloud Storage bucket. AI Platform (Unified) runsthe code from this package. In this tutorial, AI Platform (Unified) also saves thetrained model that results from your job in the same bucket. You can thencreate an `Endpoint` resource based on this output in order to serveonline predictions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants Import AI Platform (Unified) client libraryImport the AI Platform (Unified) client library into our Python environment.
###Code
import os
import sys
import time
import google.cloud.aiplatform_v1 as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
###Output
_____no_output_____
###Markdown
AI Platform (Unified) constantsSetup up the following constants for AI Platform (Unified):- `API_ENDPOINT`: The AI Platform (Unified) API service endpoint for dataset, model, job, pipeline and endpoint services.- `PARENT`: The AI Platform (Unified) location root path for dataset, model, job, pipeline and endpoint resources.
###Code
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# AI Platform (Unified) location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
###Output
_____no_output_____
###Markdown
Hardware AcceleratorsSet the hardware accelerators (e.g., GPU), if any, for training and prediction.Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)For GPU, available accelerators include: - aip.AcceleratorType.NVIDIA_TESLA_K80 - aip.AcceleratorType.NVIDIA_TESLA_P100 - aip.AcceleratorType.NVIDIA_TESLA_P4 - aip.AcceleratorType.NVIDIA_TESLA_T4 - aip.AcceleratorType.NVIDIA_TESLA_V100Otherwise specify `(None, None)` to use a container image to run on a CPU.*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
###Code
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
###Output
_____no_output_____
###Markdown
Container (Docker) imageNext, we will set the Docker container images for training and prediction - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-3:latest` - TensorFlow 2.4 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-4:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-4:latest` - XGBoost - `gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1` - Scikit-learn - `gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest` - Pytorch - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-5:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-6:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-7:latest`For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers). - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest` - XGBoost - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest` - Scikit-learn - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest`For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers)
###Code
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
###Output
_____no_output_____
###Markdown
Machine TypeNext, set the machine type to use for training and prediction.- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
###Code
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to start creating your own custom model and training for CIFAR10. Set up clientsThe AI Platform (Unified) client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the AI Platform (Unified) server.You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.- Model Service for `Model` resources.- Endpoint Service for deployment.- Job Service for batch jobs and custom training.- Prediction Service for serving.
###Code
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
###Output
_____no_output_____
###Markdown
Train a modelThere are two ways you can train a custom model using a container image:- **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.- **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model. Prepare your custom job specificationNow that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following:- `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed)- `python_package_spec` : The specification of the Python package to be installed with the pre-built container. Prepare your machine specificationNow define the machine specification for your custom training job. This tells AI Platform (Unified) what type of machine instance to provision for the training. - `machine_type`: The type of GCP instance to provision -- e.g., n1-standard-8. - `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU. - `accelerator_count`: The number of accelerators.
###Code
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
###Output
_____no_output_____
###Markdown
Prepare your disk specification(optional) Now define the disk specification for your custom training job. This tells AI Platform (Unified) what type and size of disk to provision in each machine instance for the training. - `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. - `boot_disk_size_gb`: Size of disk in GB.
###Code
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
###Output
_____no_output_____
###Markdown
Define the worker pool specificationNext, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following:- `replica_count`: The number of instances to provision of this machine type.- `machine_spec`: The hardware specification.- `disk_spec` : (optional) The disk storage specification.- `python_package`: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module.Let's dive deeper now into the python package specification:-`executor_image_spec`: This is the docker image which is configured for your custom training job.-`package_uris`: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image.-`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix.-`args`: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting: - `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts: - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `"--epochs=" + EPOCHS`: The number of epochs for training. - `"--steps=" + STEPS`: The number of steps (batches) per epoch. - `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training. - `"single"`: single device. - `"mirror"`: all GPU devices on a single compute instance. - `"multi"`: all GPU devices on all compute instances.
###Code
JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_cifar10.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]
###Output
_____no_output_____
###Markdown
Assemble a job specificationNow assemble the complete description for the custom job specification:- `display_name`: The human readable name you assign to this custom job.- `job_spec`: The specification for the custom job. - `worker_pool_specs`: The specification for the machine VM instances. - `base_output_directory`: This tells the service the Cloud Storage location where to save the model artifacts (when variable `DIRECT = False`). The service will then pass the location to the training script as the environment variable `AIP_MODEL_DIR`, and the path will be of the form: /model
###Code
if DIRECT:
job_spec = {"worker_pool_specs": worker_pool_spec}
else:
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {"output_uri_prefix": MODEL_DIR},
}
custom_job = {"display_name": JOB_NAME, "job_spec": job_spec}
###Output
_____no_output_____
###Markdown
Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package.
###Code
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: AI Platform (Unified)"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
###Output
_____no_output_____
###Markdown
Task.py contentsIn the next cell, you write the contents of the training script task.py. We won't go into detail, it's just there for you to browse. In summary:- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.- Loads CIFAR10 dataset from TF Datasets (tfds).- Builds a model using TF.Keras model API.- Compiles the model (`compile()`).- Sets a training distribution strategy according to the argument `args.distribute`.- Trains the model (`fit()`) with epochs and steps according to the arguments `args.epochs` and `args.steps`- Saves the trained model (`save(args.model_dir)`) to the specified model directory.
###Code
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
# Scaling CIFAR10 data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
# Build the Keras model
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
# Train the model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
###Output
_____no_output_____
###Markdown
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
###Code
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz
###Output
_____no_output_____
###Markdown
Train the modelNow start the training of your custom training job on AI Platform (Unified). Use this helper function `create_custom_job`, which takes the following parameter:-`custom_job`: The specification for the custom job.The helper function calls job client service's `create_custom_job` method, with the following parameters:-`parent`: The AI Platform (Unified) location path to `Dataset`, `Model` and `Endpoint` resources.-`custom_job`: The specification for the custom job.You will display a handful of the fields returned in `response` object, with the two that are of most interest are:`response.name`: The AI Platform (Unified) fully qualified identifier assigned to this custom training job. You save this identifier for using in subsequent steps.`response.state`: The current state of the custom training job.
###Code
def create_custom_job(custom_job):
response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job)
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = create_custom_job(custom_job)
###Output
_____no_output_____
###Markdown
Now get the unique identifier for the custom job you created.
###Code
# The full unique ID for the custom job
job_id = response.name
# The short numeric ID for the custom job
job_short_id = job_id.split("/")[-1]
print(job_id)
###Output
_____no_output_____
###Markdown
Get information on a custom jobNext, use this helper function `get_custom_job`, which takes the following parameter:- `name`: The AI Platform (Unified) fully qualified identifier for the custom job.The helper function calls the job client service's`get_custom_job` method, with the following parameter:- `name`: The AI Platform (Unified) fully qualified identifier for the custom job.If you recall, you got the AI Platform (Unified) fully qualified identifier for the custom job in the `response.name` field when you called the `create_custom_job` method, and saved the identifier in the variable `job_id`.
###Code
def get_custom_job(name, silent=False):
response = clients["job"].get_custom_job(name=name)
if silent:
return response
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = get_custom_job(job_id)
###Output
_____no_output_____
###Markdown
DeploymentTraining the above model may take upwards of 20 minutes time.Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, we will need to know the location of the saved model, which the Python script saved in your local Cloud Storage bucket at `MODEL_DIR + '/saved_model.pb'`.
###Code
while True:
response = get_custom_job(job_id, True)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_path_to_deploy = None
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
print("Training Time:", response.update_time - response.create_time)
break
time.sleep(60)
print("model_to_deploy:", model_path_to_deploy)
###Output
_____no_output_____
###Markdown
Load the saved modelYour model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.
###Code
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
###Output
_____no_output_____
###Markdown
Evaluate the modelNow find out how good the model is. Load evaluation dataYou will load the CIFAR10 test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels.You don't need the training data, and hence why we loaded it as `(_, _)`.Before you can run the data through evaluation, you need to preprocess it:x_test:1. Normalize (rescaling) the pixel data by dividing each pixel by 255. This will replace each single byte integer pixel with a 32-bit floating point number between 0 and 1.y_test:2. The labels are currently scalar (sparse). If you look back at the `compile()` step in the `trainer/task.py` script, you will find that it was compiled for sparse labels. So we don't need to do anything more.
###Code
import numpy as np
from tensorflow.keras.datasets import cifar10
(_, _), (x_test, y_test) = cifar10.load_data()
x_test = (x_test / 255.0).astype(np.float32)
print(x_test.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Perform the model evaluationNow evaluate how well the model in the custom job did.
###Code
model.evaluate(x_test, y_test)
###Output
_____no_output_____
###Markdown
Upload the model for servingNext, you will upload your TF.Keras model from the custom job to AI Platform (Unified) `Model` service, which will create a AI Platform (Unified) `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to AI Platform (Unified), your serving function ensures that the data is decoded on the model server before it is passed as input to your model. How does the serving function workWhen you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.The serving function consists of two parts:- `preprocessing function`: - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). - Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.- `post-processing function`: - Converts the model output to format expected by the receiving application -- e.q., compresses the output. - Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. Serving function for image dataTo pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes before it is passed as input to the deployed model.To resolve this, define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU).When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model:- `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB).- `image.convert_image_dtype` - Changes integer pixel values to float 32.- `image.resize` - Resizes the image to match the input shape for the model.- `resized / 255.0` - Rescales (normalization) the pixel data between 0 and 1.At this point, the data can be passed to the model (`m_call`).
###Code
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
rescale = tf.cast(resized / 255.0, tf.float32)
return rescale
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
m_call = tf.function(model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
tf.saved_model.save(
model, model_path_to_deploy, signatures={"serving_default": serving_fn}
)
###Output
_____no_output_____
###Markdown
Get the serving function signatureYou can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.For your purpose, you need the signature of the serving function. Why? Well, when we send our data for prediction as a HTTP request packet, the image data is base64 encoded, and our TF.Keras model takes numpy input. Your serving function will do the conversion from base64 to a numpy array.When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
###Code
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
###Output
_____no_output_____
###Markdown
Upload the modelUse this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a AI Platform (Unified) `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other AI Platform (Unified) `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions.The helper function takes the following parameters:- `display_name`: A human readable name for the `Endpoint` service.- `image_uri`: The container image for the model deployment.- `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`.The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters:- `parent`: The AI Platform (Unified) location root path for `Dataset`, `Model` and `Endpoint` resources.- `model`: The specification for the AI Platform (Unified) `Model` resource instance.Let's now dive deeper into the AI Platform (Unified) model specification `model`. This is a dictionary object that consists of the following fields:- `display_name`: A human readable name for the `Model` resource.- `metadata_schema_uri`: Since your model was built without an AI Platform (Unified) `Dataset` resource, you will leave this blank (`''`).- `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format.- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.Uploading a model into a AI Platform (Unified) Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the AI Platform (Unified) Model resource is ready.The helper function returns the AI Platform (Unified) fully qualified identifier for the corresponding AI Platform (Unified) Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id.
###Code
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"cifar10-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
###Output
_____no_output_____
###Markdown
Get `Model` resource informationNow let's get the model information for just your model. Use this helper function `get_model`, with the following parameter:- `name`: The AI Platform (Unified) unique identifier for the `Model` resource.This helper function calls the AI Platform (Unified) `Model` client service's method `get_model`, with the following parameter:- `name`: The AI Platform (Unified) unique identifier for the `Model` resource.
###Code
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
###Output
_____no_output_____
###Markdown
Deploy the `Model` resourceNow deploy the trained AI Platform (Unified) custom `Model` resource. This requires two steps:1. Create an `Endpoint` resource for deploying the `Model` resource to.2. Deploy the `Model` resource to the `Endpoint` resource. Create an `Endpoint` resourceUse this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter:- `display_name`: A human readable name for the `Endpoint` resource.The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter:- `display_name`: A human readable name for the `Endpoint` resource.Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the AI Platform (Unified) fully qualified identifier for the `Endpoint` resource: `response.name`.
###Code
ENDPOINT_NAME = "cifar10_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
###Output
_____no_output_____
###Markdown
Now get the unique identifier for the `Endpoint` resource you created.
###Code
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
###Output
_____no_output_____
###Markdown
Compute instance scalingYou have several choices on scaling the compute instances for handling your online prediction requests:- Single Instance: The online prediction requests are processed on a single compute instance. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.- Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.- Auto Scaling: The online prediction requests are split across a scaleable number of compute instances. - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.
###Code
MIN_NODES = 1
MAX_NODES = 1
###Output
_____no_output_____
###Markdown
Deploy `Model` resource to the `Endpoint` resourceUse this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters:- `model`: The AI Platform (Unified) fully qualified model identifier of the model to upload (deploy) from the training pipeline.- `deploy_model_display_name`: A human readable name for the deployed model.- `endpoint`: The AI Platform (Unified) fully qualified endpoint identifier to deploy the model to.The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters:- `endpoint`: The AI Platform (Unified) fully qualified `Endpoint` resource identifier to deploy the `Model` resource to.- `deployed_model`: The requirements specification for deploying the model.- `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs. - If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic. - If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100.Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields:- `model`: The AI Platform (Unified) fully qualified model identifier of the (upload) model to deploy.- `display_name`: A human readable name for the deployed model.- `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.- `dedicated_resources`: This refers to how many compute instances (replicas) that are scaled for serving prediction requests. - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `min_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`. - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`. Traffic SplitLet's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision. ResponseThe method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources.
###Code
DEPLOYED_NAME = "cifar10_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
###Output
_____no_output_____
###Markdown
Make a online prediction requestNow do a online prediction to your deployed model. Get test itemYou will use an example out of the test (holdout) portion of the dataset as a test item.
###Code
test_image = x_test[0]
test_label = y_test[0]
print(test_image.shape)
###Output
_____no_output_____
###Markdown
Prepare the request contentYou are going to send the CIFAR10 image as compressed JPG image, instead of the raw uncompressed bytes:- `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image. - Denormalize the image data from \[0,1) range back to [0,255). - Convert the 32-bit floating point values to 8-bit unsigned integers.- `tf.io.read_file`: Read the compressed JPG images back into memory as raw bytes.- `base64.b64encode`: Encode the raw bytes into a base 64 encoded string.
###Code
import base64
import cv2
cv2.imwrite("tmp.jpg", (test_image * 255).astype(np.uint8))
bytes = tf.io.read_file("tmp.jpg")
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
###Output
_____no_output_____
###Markdown
Send the prediction requestOk, now you have a test image. Use this helper function `predict_image`, which takes the following parameters:- `image`: The test image data as a numpy array.- `endpoint`: The AI Platform (Unified) fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed to.- `parameters_dict`: Additional parameters for serving.This function calls the prediction client service `predict` method with the following parameters:- `endpoint`: The AI Platform (Unified) fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed to.- `instances`: A list of instances (encoded images) to predict.- `parameters`: Additional parameters for serving.To pass the image data to the prediction service, in the previous step you encoded the bytes into base64 -- which makes the content safe from modification when transmitting binary data over the network. You need to tell the serving binary where your model is deployed to, that the content has been base64 encoded, so it will decode it on the other end in the serving binary.Each instance in the prediction request is a dictionary entry of the form: {serving_input: {'b64': content}}- `input_name`: the name of the input layer of the underlying model.- `'b64'`: A key that indicates the content is base64 encoded.- `content`: The compressed JPG image bytes as a base64 encoded string.Since the `predict()` service can take multiple images (instances), you will send your single image as a list of one image. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the `predict()` service.The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction:- `predictions`: Confidence level for the prediction, between 0 and 1, for each of the classes.
###Code
def predict_image(image, endpoint, parameters_dict):
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: {"b64": image}}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters_dict
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_image(b64str, endpoint_id, None)
###Output
_____no_output_____
###Markdown
Undeploy the `Model` resourceNow undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters:- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to.- `endpoint`: The AI Platform (Unified) fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to.This function calls the endpoint client service's method `undeploy_model`, with the following parameters:- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed.- `endpoint`: The AI Platform (Unified) fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed.- `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource.Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}.
###Code
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all GCP resources used in this project, you can [delete the GCPproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Dataset- Pipeline- Model- Endpoint- Batch Job- Custom Job- Hyperparameter Tuning Job- Cloud Storage Bucket
###Code
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the AI Platform (Unified) fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the AI Platform (Unified) fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the AI Platform (Unified) fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the AI Platform (Unified) fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the AI Platform (Unified) fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the AI Platform (Unified) fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the AI Platform (Unified) fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Vertex AI client library: Custom training image classification model for online prediction Run in Colab View on GitHub OverviewThis tutorial demonstrates how to use the Vertex AI Python client library to train and deploy a custom image classification model for online prediction. DatasetThe dataset used for this tutorial is the [CIFAR10 dataset](https://www.tensorflow.org/datasets/catalog/cifar10) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck. ObjectiveIn this tutorial, you create a custom model from a Python script in a Google prebuilt Docker container using the Vertex AI client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using `gcloud` command-line tool or online using Google Cloud Console.The steps performed include:- Create a Vertex AI custom job for training a model.- Train a TensorFlow model.- Retrieve and load the model artifacts.- View the model evaluation.- Upload the model as a Vertex AI `Model` resource.- Deploy the `Model` resource to a serving `Endpoint` resource.- Make a prediction.- Undeploy the `Model` resource. CostsThis tutorial uses billable components of Google Cloud (GCP):* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. InstallationInstall the latest version of Vertex AI client library.
###Code
import sys
if "google.colab" in sys.modules:
USER_FLAG = ""
else:
USER_FLAG = "--user"
! pip3 install -U google-cloud-aiplatform $USER_FLAG
###Output
_____no_output_____
###Markdown
Install the latest GA version of *google-cloud-storage* library as well.
###Code
! pip3 install -U google-cloud-storage $USER_FLAG
###Output
_____no_output_____
###Markdown
Restart the kernelOnce you've installed the Vertex AI client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
###Code
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Before you begin GPU runtime*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)3. [Enable the Vertex AI APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Vertex AI Notebooks.5. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
###Code
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. For the latest support per region, see the [Vertex AI locations documentation](https://cloud.google.com/ai-platform-unified/docs/general/locations)
###Code
REGION = "us-central1" # @param {type: "string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using Vertex AI Notebooks**, your environment is already authenticated. Skip this step.**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.**Click Create service account**.In the **Service account name** field, enter a name, and click **Create**.In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.Click Create. A JSON file that contains your key downloads to your local environment.Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
###Code
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AI Platform, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you submit a custom training job using the Vertex AI client library, you upload a Python packagecontaining your training code to a Cloud Storage bucket. Vertex AI runsthe code from this package. In this tutorial, Vertex AI also saves thetrained model that results from your job in the same bucket. You can thencreate an `Endpoint` resource based on this output in order to serveonline predictions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants Import Vertex AI client libraryImport the Vertex AI client library into our Python environment.
###Code
import os
import sys
import time
import google.cloud.aiplatform_v1 as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
###Output
_____no_output_____
###Markdown
Vertex AI constantsSetup up the following constants for Vertex AI:- `API_ENDPOINT`: The Vertex AI API service endpoint for dataset, model, job, pipeline and endpoint services.- `PARENT`: The Vertex AI location root path for dataset, model, job, pipeline and endpoint resources.
###Code
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex AI location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
###Output
_____no_output_____
###Markdown
Hardware AcceleratorsSet the hardware accelerators (e.g., GPU), if any, for training and prediction.Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)For GPU, available accelerators include: - aip.AcceleratorType.NVIDIA_TESLA_K80 - aip.AcceleratorType.NVIDIA_TESLA_P100 - aip.AcceleratorType.NVIDIA_TESLA_P4 - aip.AcceleratorType.NVIDIA_TESLA_T4 - aip.AcceleratorType.NVIDIA_TESLA_V100Otherwise specify `(None, None)` to use a container image to run on a CPU.*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
###Code
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
###Output
_____no_output_____
###Markdown
Container (Docker) imageNext, we will set the Docker container images for training and prediction - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-3:latest` - TensorFlow 2.4 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-4:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-4:latest` - XGBoost - `gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1` - Scikit-learn - `gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest` - Pytorch - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-5:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-6:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-7:latest`For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers). - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest` - XGBoost - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest` - Scikit-learn - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest`For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers)
###Code
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
###Output
_____no_output_____
###Markdown
Machine TypeNext, set the machine type to use for training and prediction.- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
###Code
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
###Output
_____no_output_____
###Markdown
TutorialNow you are ready to start creating your own custom model and training for CIFAR10. Set up clientsThe Vertex AI client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex AI server.You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.- Model Service for `Model` resources.- Endpoint Service for deployment.- Job Service for batch jobs and custom training.- Prediction Service for serving.
###Code
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
###Output
_____no_output_____
###Markdown
Train a modelThere are two ways you can train a custom model using a container image:- **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.- **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model. Prepare your custom job specificationNow that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following:- `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed)- `python_package_spec` : The specification of the Python package to be installed with the pre-built container. Prepare your machine specificationNow define the machine specification for your custom training job. This tells Vertex AI what type of machine instance to provision for the training. - `machine_type`: The type of GCP instance to provision -- e.g., n1-standard-8. - `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU. - `accelerator_count`: The number of accelerators.
###Code
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
###Output
_____no_output_____
###Markdown
Prepare your disk specification(optional) Now define the disk specification for your custom training job. This tells Vertex AI what type and size of disk to provision in each machine instance for the training. - `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. - `boot_disk_size_gb`: Size of disk in GB.
###Code
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
###Output
_____no_output_____
###Markdown
Define the worker pool specificationNext, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following:- `replica_count`: The number of instances to provision of this machine type.- `machine_spec`: The hardware specification.- `disk_spec` : (optional) The disk storage specification.- `python_package`: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module.Let's dive deeper now into the python package specification:-`executor_image_spec`: This is the docker image which is configured for your custom training job.-`package_uris`: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image.-`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix.-`args`: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting: - `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts: - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `"--epochs=" + EPOCHS`: The number of epochs for training. - `"--steps=" + STEPS`: The number of steps (batches) per epoch. - `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training. - `"single"`: single device. - `"mirror"`: all GPU devices on a single compute instance. - `"multi"`: all GPU devices on all compute instances.
###Code
JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_cifar10.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]
###Output
_____no_output_____
###Markdown
Assemble a job specificationNow assemble the complete description for the custom job specification:- `display_name`: The human readable name you assign to this custom job.- `job_spec`: The specification for the custom job. - `worker_pool_specs`: The specification for the machine VM instances. - `base_output_directory`: This tells the service the Cloud Storage location where to save the model artifacts (when variable `DIRECT = False`). The service will then pass the location to the training script as the environment variable `AIP_MODEL_DIR`, and the path will be of the form: /model
###Code
if DIRECT:
job_spec = {"worker_pool_specs": worker_pool_spec}
else:
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {"output_uri_prefix": MODEL_DIR},
}
custom_job = {"display_name": JOB_NAME, "job_spec": job_spec}
###Output
_____no_output_____
###Markdown
Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package.
###Code
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex AI"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
###Output
_____no_output_____
###Markdown
Task.py contentsIn the next cell, you write the contents of the training script task.py. We won't go into detail, it's just there for you to browse. In summary:- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.- Loads CIFAR10 dataset from TF Datasets (tfds).- Builds a model using TF.Keras model API.- Compiles the model (`compile()`).- Sets a training distribution strategy according to the argument `args.distribute`.- Trains the model (`fit()`) with epochs and steps according to the arguments `args.epochs` and `args.steps`- Saves the trained model (`save(args.model_dir)`) to the specified model directory.
###Code
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
# Scaling CIFAR10 data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
# Build the Keras model
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
# Train the model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
###Output
_____no_output_____
###Markdown
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
###Code
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz
###Output
_____no_output_____
###Markdown
Train the modelNow start the training of your custom training job on Vertex AI. Use this helper function `create_custom_job`, which takes the following parameter:-`custom_job`: The specification for the custom job.The helper function calls job client service's `create_custom_job` method, with the following parameters:-`parent`: The Vertex AI location path to `Dataset`, `Model` and `Endpoint` resources.-`custom_job`: The specification for the custom job.You will display a handful of the fields returned in `response` object, with the two that are of most interest are:`response.name`: The Vertex AI fully qualified identifier assigned to this custom training job. You save this identifier for using in subsequent steps.`response.state`: The current state of the custom training job.
###Code
def create_custom_job(custom_job):
response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job)
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = create_custom_job(custom_job)
###Output
_____no_output_____
###Markdown
Now get the unique identifier for the custom job you created.
###Code
# The full unique ID for the custom job
job_id = response.name
# The short numeric ID for the custom job
job_short_id = job_id.split("/")[-1]
print(job_id)
###Output
_____no_output_____
###Markdown
Get information on a custom jobNext, use this helper function `get_custom_job`, which takes the following parameter:- `name`: The Vertex AI fully qualified identifier for the custom job.The helper function calls the job client service's`get_custom_job` method, with the following parameter:- `name`: The Vertex AI fully qualified identifier for the custom job.If you recall, you got the Vertex AI fully qualified identifier for the custom job in the `response.name` field when you called the `create_custom_job` method, and saved the identifier in the variable `job_id`.
###Code
def get_custom_job(name, silent=False):
response = clients["job"].get_custom_job(name=name)
if silent:
return response
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = get_custom_job(job_id)
###Output
_____no_output_____
###Markdown
DeploymentTraining the above model may take upwards of 20 minutes time.Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, we will need to know the location of the saved model, which the Python script saved in your local Cloud Storage bucket at `MODEL_DIR + '/saved_model.pb'`.
###Code
while True:
response = get_custom_job(job_id, True)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_path_to_deploy = None
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
print("Training Time:", response.update_time - response.create_time)
break
time.sleep(60)
print("model_to_deploy:", model_path_to_deploy)
###Output
_____no_output_____
###Markdown
Load the saved modelYour model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.
###Code
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
###Output
_____no_output_____
###Markdown
Evaluate the modelNow find out how good the model is. Load evaluation dataYou will load the CIFAR10 test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels.You don't need the training data, and hence why we loaded it as `(_, _)`.Before you can run the data through evaluation, you need to preprocess it:x_test:1. Normalize (rescaling) the pixel data by dividing each pixel by 255. This will replace each single byte integer pixel with a 32-bit floating point number between 0 and 1.y_test:2. The labels are currently scalar (sparse). If you look back at the `compile()` step in the `trainer/task.py` script, you will find that it was compiled for sparse labels. So we don't need to do anything more.
###Code
import numpy as np
from tensorflow.keras.datasets import cifar10
(_, _), (x_test, y_test) = cifar10.load_data()
x_test = (x_test / 255.0).astype(np.float32)
print(x_test.shape, y_test.shape)
###Output
_____no_output_____
###Markdown
Perform the model evaluationNow evaluate how well the model in the custom job did.
###Code
model.evaluate(x_test, y_test)
###Output
_____no_output_____
###Markdown
Upload the model for servingNext, you will upload your TF.Keras model from the custom job to Vertex AI `Model` service, which will create a Vertex AI `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex AI, your serving function ensures that the data is decoded on the model server before it is passed as input to your model. How does the serving function workWhen you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.The serving function consists of two parts:- `preprocessing function`: - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). - Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.- `post-processing function`: - Converts the model output to format expected by the receiving application -- e.q., compresses the output. - Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. Serving function for image dataTo pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes before it is passed as input to the deployed model.To resolve this, define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU).When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model:- `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB).- `image.convert_image_dtype` - Changes integer pixel values to float 32.- `image.resize` - Resizes the image to match the input shape for the model.- `resized / 255.0` - Rescales (normalization) the pixel data between 0 and 1.At this point, the data can be passed to the model (`m_call`).
###Code
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
rescale = tf.cast(resized / 255.0, tf.float32)
return rescale
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
m_call = tf.function(model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
tf.saved_model.save(
model, model_path_to_deploy, signatures={"serving_default": serving_fn}
)
###Output
_____no_output_____
###Markdown
Get the serving function signatureYou can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.For your purpose, you need the signature of the serving function. Why? Well, when we send our data for prediction as a HTTP request packet, the image data is base64 encoded, and our TF.Keras model takes numpy input. Your serving function will do the conversion from base64 to a numpy array.When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
###Code
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
###Output
_____no_output_____
###Markdown
Upload the modelUse this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex AI `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex AI `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions.The helper function takes the following parameters:- `display_name`: A human readable name for the `Endpoint` service.- `image_uri`: The container image for the model deployment.- `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`.The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters:- `parent`: The Vertex AI location root path for `Dataset`, `Model` and `Endpoint` resources.- `model`: The specification for the Vertex AI `Model` resource instance.Let's now dive deeper into the Vertex AI model specification `model`. This is a dictionary object that consists of the following fields:- `display_name`: A human readable name for the `Model` resource.- `metadata_schema_uri`: Since your model was built without an Vertex AI `Dataset` resource, you will leave this blank (`''`).- `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format.- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.Uploading a model into a Vertex AI Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex AI Model resource is ready.The helper function returns the Vertex AI fully qualified identifier for the corresponding Vertex AI Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id.
###Code
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"cifar10-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
###Output
_____no_output_____
###Markdown
Get `Model` resource informationNow let's get the model information for just your model. Use this helper function `get_model`, with the following parameter:- `name`: The Vertex AI unique identifier for the `Model` resource.This helper function calls the Vertex AI `Model` client service's method `get_model`, with the following parameter:- `name`: The Vertex AI unique identifier for the `Model` resource.
###Code
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
###Output
_____no_output_____
###Markdown
Deploy the `Model` resourceNow deploy the trained Vertex AI custom `Model` resource. This requires two steps:1. Create an `Endpoint` resource for deploying the `Model` resource to.2. Deploy the `Model` resource to the `Endpoint` resource. Create an `Endpoint` resourceUse this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter:- `display_name`: A human readable name for the `Endpoint` resource.The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter:- `display_name`: A human readable name for the `Endpoint` resource.Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex AI fully qualified identifier for the `Endpoint` resource: `response.name`.
###Code
ENDPOINT_NAME = "cifar10_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
###Output
_____no_output_____
###Markdown
Now get the unique identifier for the `Endpoint` resource you created.
###Code
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
###Output
_____no_output_____
###Markdown
Compute instance scalingYou have several choices on scaling the compute instances for handling your online prediction requests:- Single Instance: The online prediction requests are processed on a single compute instance. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.- Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.- Auto Scaling: The online prediction requests are split across a scaleable number of compute instances. - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.
###Code
MIN_NODES = 1
MAX_NODES = 1
###Output
_____no_output_____
###Markdown
Deploy `Model` resource to the `Endpoint` resourceUse this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters:- `model`: The Vertex AI fully qualified model identifier of the model to upload (deploy) from the training pipeline.- `deploy_model_display_name`: A human readable name for the deployed model.- `endpoint`: The Vertex AI fully qualified endpoint identifier to deploy the model to.The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters:- `endpoint`: The Vertex AI fully qualified `Endpoint` resource identifier to deploy the `Model` resource to.- `deployed_model`: The requirements specification for deploying the model.- `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs. - If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic. - If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100.Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields:- `model`: The Vertex AI fully qualified model identifier of the (upload) model to deploy.- `display_name`: A human readable name for the deployed model.- `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.- `dedicated_resources`: This refers to how many compute instances (replicas) that are scaled for serving prediction requests. - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `min_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`. - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`. Traffic SplitLet's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision. ResponseThe method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources.
###Code
DEPLOYED_NAME = "cifar10_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
###Output
_____no_output_____
###Markdown
Make a online prediction requestNow do a online prediction to your deployed model. Get test itemYou will use an example out of the test (holdout) portion of the dataset as a test item.
###Code
test_image = x_test[0]
test_label = y_test[0]
print(test_image.shape)
###Output
_____no_output_____
###Markdown
Prepare the request contentYou are going to send the CIFAR10 image as compressed JPG image, instead of the raw uncompressed bytes:- `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image. - Denormalize the image data from \[0,1) range back to [0,255). - Convert the 32-bit floating point values to 8-bit unsigned integers.- `tf.io.read_file`: Read the compressed JPG images back into memory as raw bytes.- `base64.b64encode`: Encode the raw bytes into a base 64 encoded string.
###Code
import base64
import cv2
cv2.imwrite("tmp.jpg", (test_image * 255).astype(np.uint8))
bytes = tf.io.read_file("tmp.jpg")
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
###Output
_____no_output_____
###Markdown
Send the prediction requestOk, now you have a test image. Use this helper function `predict_image`, which takes the following parameters:- `image`: The test image data as a numpy array.- `endpoint`: The Vertex AI fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed to.- `parameters_dict`: Additional parameters for serving.This function calls the prediction client service `predict` method with the following parameters:- `endpoint`: The Vertex AI fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed to.- `instances`: A list of instances (encoded images) to predict.- `parameters`: Additional parameters for serving.To pass the image data to the prediction service, in the previous step you encoded the bytes into base64 -- which makes the content safe from modification when transmitting binary data over the network. You need to tell the serving binary where your model is deployed to, that the content has been base64 encoded, so it will decode it on the other end in the serving binary.Each instance in the prediction request is a dictionary entry of the form: {serving_input: {'b64': content}}- `input_name`: the name of the input layer of the underlying model.- `'b64'`: A key that indicates the content is base64 encoded.- `content`: The compressed JPG image bytes as a base64 encoded string.Since the `predict()` service can take multiple images (instances), you will send your single image as a list of one image. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the `predict()` service.The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction:- `predictions`: Confidence level for the prediction, between 0 and 1, for each of the classes.
###Code
def predict_image(image, endpoint, parameters_dict):
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: {"b64": image}}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters_dict
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_image(b64str, endpoint_id, None)
###Output
_____no_output_____
###Markdown
Undeploy the `Model` resourceNow undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters:- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to.- `endpoint`: The Vertex AI fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to.This function calls the endpoint client service's method `undeploy_model`, with the following parameters:- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed.- `endpoint`: The Vertex AI fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed.- `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource.Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}.
###Code
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all GCP resources used in this project, you can [delete the GCPproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Dataset- Pipeline- Model- Endpoint- Batch Job- Custom Job- Hyperparameter Tuning Job- Cloud Storage Bucket
###Code
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex AI fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex AI fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex AI fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex AI fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex AI fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex AI fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex AI fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
###Output
_____no_output_____ |
model/DataScienceProject(House Price Prediction).ipynb | ###Markdown
DATA CLEANING STEP
###Code
data=pd.read_csv('/Users/chohan/Desktop/ML_DL_ Hackathon/ML_REALTIME PROJECT(A_TO_Z)(real_estate price Prediction)/Bengaluru_House_Data.csv')
data.head()
data.info()
data.describe()
data.isnull().sum()
data.shape
data.groupby("area_type")["area_type"].agg("count")
data1=data.drop(["area_type","availability","society","balcony"],axis="columns")
data1.head()
data1.isnull().sum()
data2=data1.dropna()
data2["size"].unique()
data2["bhk"]=data2["size"].apply(lambda x: int(x.split(" ")[0]))
data2.head()
data3=data2.drop(["size"],axis="columns")
data3.head()
data3["bhk"].unique()
data3[data3.bhk>20]
data3["total_sqft"].unique()
def isfloat(x):
try:
float(x)
except:
return False
return True
data3[~data3["total_sqft"].apply(isfloat)].head()
def Convert_sqft_to_num(x):
token=x.split("-")
if len(token)==2:
return (float(token[0])+float(token[1]))/2
try:
return float(x)
except:
return None
data4=data3.copy()
data4["total_sqft"]=data4["total_sqft"].apply(Convert_sqft_to_num)
data4.head()
###Output
_____no_output_____
###Markdown
DATA FEATURING STEP
###Code
data4.head()
data5=data4.copy()
data5["price_prsqft"]=data5["price"]*100000/data5["total_sqft"]
data5.head()
data5.location=data5.location.apply(lambda x:x.strip())
location_stats=data5.groupby("location")["location"].agg('count').sort_values(ascending=False)
location_stats
len(location_stats[location_stats<=10])
location_stats_lessthan10=location_stats[location_stats<=10]
location_stats_lessthan10
len(location_stats_lessthan10)
len(data5.location.unique())
data5.location=data5.location.apply(lambda x:"other" if x in location_stats_lessthan10 else x)
len(data5.location.unique())
data5.head()
###Output
_____no_output_____
###Markdown
Outlier Removal Step
###Code
data6=data5[~(data5.total_sqft/data5.bhk<300)]
data6.shape
data6.describe()
def remove_pps_outlier(df):
df_out=pd.DataFrame()
for key,subdf in df.groupby("location"):
m=np.mean(subdf.price_prsqft)
st=np.std(subdf.price_prsqft)
reduce_df=subdf[(subdf.price_prsqft>(m-st))& (subdf.price_prsqft<=(m+st))]
df_out=pd.concat([df_out,reduce_df],ignore_index=True)
return df_out
data7=remove_pps_outlier(data6)
data7.shape
def plot_scatterPlot(df,location):
plt.figure(figsize=(20,10))
bhk2=df[(df.location==location)& (df.bhk==2)]
bhk3=df[(df.location==location)& (df.bhk==3)]
plt.scatter(bhk2.total_sqft,bhk2.price,color="blue",label="bhk2",s=60)
plt.scatter(bhk3.total_sqft,bhk3.price,marker="+",color="green",label="bhk2",s=60)
plt.xlabel("total square feet area")
plt.ylabel("price")
plt.title(location)
plt.legend()
plot_scatterPlot(data7,"Hebbal")
def remove_bhk_outlier(df):
exclude_indice=np.array([])
for location,location_df in df.groupby("location"):
bhk_stats={}
for bhk,bhk_df in location_df.groupby("bhk"):
bhk_stats[bhk]={
"mean":np.mean(bhk_df.price_prsqft),
"std":np.std(bhk_df.price_prsqft),
"count":bhk_df.shape[0]
}
for bhk,bhk_df in location_df.groupby('bhk'):
stats=bhk_stats.get(bhk-1)
if stats and stats["count"]>5:
exclude_indice=np.append(exclude_indice,bhk_df[bhk_df.price_prsqft<(stats['mean'])].index.values)
return df.drop(exclude_indice,axis='index')
data8=remove_bhk_outlier(data7)
plot_scatterPlot(data8,"Hebbal")
plt.hist(data8.price_prsqft,rwidth=0.8)
plt.xlabel("price per square feet")
plt.ylabel("count")
plt.hist(data8.bath,rwidth=0.8)
plt.xlabel("price per square feet")
plt.ylabel("count")
data8[data8.bath>data8.bhk+2]
data9=data8[data8.bath<data8.bhk+2]
data9.shape
data10=data8.drop(["price_prsqft"],axis="columns")
data10.shape
###Output
_____no_output_____
###Markdown
Model Building Step
###Code
dummies=pd.get_dummies(data10.location)
data11=pd.concat([data10,dummies.drop("other",axis="columns")],axis="columns")
data11.head()
data12=data11.drop("location",axis="columns")
data12.head()
data12.shape
x=data12.drop("price",axis="columns")
x.head()
y=data12.price
y.head()
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=10)
from sklearn.linear_model import LinearRegression
lr_clf=LinearRegression()
lr_clf.fit(x_train,y_train)
lr_clf.score(x_test,y_test)
from sklearn.model_selection import ShuffleSplit,cross_val_score,GridSearchCV
cv=ShuffleSplit(n_splits=5,test_size=0.2,random_state=0)
cross_val_score(LinearRegression(),x,y,cv=cv)
from sklearn.linear_model import Lasso
from sklearn.tree import DecisionTreeRegressor
def find_best_model_usinggridsearchcv(X,y):
algo={
"linear_model":{
"model":LinearRegression(),
"params":{
"normalize":[True,False]
}
},
"lasso":{
"model":Lasso(),
"params":{
"alpha":[1,2],
"selection":["random","cyclic"]
}
},
"descisiontree":{
"model":DecisionTreeRegressor(),
"params":{
"criterion":["mse","friedman_mse"],
"splitter":["best","random"]
}
}
}
score=[]
cv=ShuffleSplit(n_splits=5,test_size=0.2,random_state=0)
for algo_name, config in algo.items():
gs=GridSearchCV(config["model"],config["params"],cv=cv,return_train_score=False)
gs.fit(X,y)
score.append({"model":algo_name,"best_score":gs.best_score_,"best_params":gs.best_params_})
return pd.DataFrame(score,columns=["model","best_score","best_params"])
find_best_model_usinggridsearchcv(x_train,y_train)
def predict_price(location,sqft,bath,bhk):
loc_index=np.where(x.columns==location)[0][0]
X=np.zeros(len(x.columns))
X[0]=sqft
X[1]=bath
X[2]=bhk
if loc_index >=0:
X[loc_index]=1
return lr_clf.predict([X])[0]
import pickle
with open("banglore_home_price.pickle","wb")as f:
pickle.dump(lr_clf,f)
import json
columns={"data_columns":[col.lower() for col in x.columns]}
with open("columns.json","w") as f:
f.write(json.dumps(columns))
###Output
_____no_output_____ |
python_notes/using_python_for_research_ph256x_harvard/hw/HW1.ipynb | ###Markdown
Using Python for Research Homework: Week 1In this homework, we will use objects, functions, and randomness to find the length of documents, approximate $\pi$, and smooth out random noise. Exercise 1In this five-part exercise, we will count the frequency of each letter in a given string. Exercise 1a- Import the `string` library.- Create a variable `alphabet` that consists of the lowercase and uppercase letters in the English alphabet using the `ascii_letters` data attribute of the `string` library.
###Code
# write your code here!
import string
alphabet = string.ascii_letters
alphabet
###Output
_____no_output_____
###Markdown
Exercise 1b- The lower and upper case letters of the English alphabet should stored as the string variable `alphabet`.- Consider the sentence 'Jim quickly realized that the beautiful gowns are expensive'. Create a dictionary `count_letters` with keys consisting of each unique letter in the sentence and values consisting of the number of times each letter is used in this sentence. Count upper case and lower case letters separately in the dictionary.
###Code
sentence = 'Jim quickly realized that the beautiful gowns are expensive'
count_letters = {}
# write your code here!
for c in sentence:
if c not in alphabet:
continue
if c in count_letters.keys():
count_letters[c] += 1
else:
count_letters[c] = 1
count_letters
###Output
_____no_output_____
###Markdown
Exercise 1c- Rewrite your code from 1b to make a function called `counter` that takes a string `input_string` and returns a dictionary of letter counts `count_letters`.- Use your function to call `counter(sentence)`.
###Code
# write your code here!
def counter(input_string):
count_letters = {}
for c in input_string:
if c not in alphabet:
continue
if c in count_letters.keys():
count_letters[c] += 1
else:
count_letters[c] = 1
return count_letters
counter(sentence)
###Output
_____no_output_____
###Markdown
Exercise 1d- Abraham Lincoln was a president during the American Civil War. His famous 1863 Gettysburg Address has been stored as `address`. Use the `counter` function from 1c to return a dictionary consisting of the count of each letter in this address and save it as `address_count`.
###Code
address = """Four score and seven years ago our fathers brought forth on this continent, a new nation,
conceived in Liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a
great civil war, testing whether that nation, or any nation so conceived and so dedicated, can long endure.
We are met on a great battle-field of that war. We have come to dedicate a portion of that field, as a final
resting place for those who here gave their lives that that nation might live. It is altogether fitting and proper
that we should do this. But, in a larger sense, we can not dedicate -- we can not consecrate -- we can not hallow --
this ground. The brave men, living and dead, who struggled here, have consecrated it, far above our poor power to add
or detract. The world will little note, nor long remember what we say here, but it can never forget what they did here.
It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so
nobly advanced. It is rather for us to be here dedicated to the great task remaining before us -- that from these honored
dead we take increased devotion to that cause for which they gave the last full measure of devotion -- that we here
highly resolve that these dead shall not have died in vain -- that this nation, under God, shall have a new birth of
freedom -- and that government of the people, by the people, for the people, shall not perish from the earth."""
# Write your code here!
address_count = counter(address)
address_count
###Output
_____no_output_____
###Markdown
Exercise 1f- The frequency of each letter in the Gettysburg Address is already stored as `address_count`. Use this dictionary to find the most common letter in the Gettysburg address.
###Code
# write your code here!
max_count = 0
maxkey = ""
for key in address_count.keys():
if max_count < address_count[key]:
max_count = address_count[key]
maxkey = key
maxkey
###Output
_____no_output_____
###Markdown
Exercise 2Consider a circle inscribed in a square. The ratio of their areas (the ratio of the area of the circle to the area of the square) is $\frac{\pi}{4}$. In this six-part exercise, we will find a way to approximate this value. Exercise 2a- Using the `math` library, calculate and print the value of $\frac{\pi}{4}$
###Code
# write your code here
import math
math.pi / 4
###Output
_____no_output_____
###Markdown
Exercise 2b- Using `random.uniform()`, create a function `rand()` that generates a single float between $-1$ and $1$.- Call `rand()` once. For us to be able to check your solution, we will use `random.seed()` to fix the seed value of the random number generator.
###Code
import random
random.seed(1) # Fixes the see of the random number generator.
def rand():
# define `rand` here!
return random.uniform(-1, 1)
rand()
###Output
_____no_output_____
###Markdown
Exercise 2c- The distance between two points x and y is the square root of the sum of squared differences along each dimension of x and y. Write a function` distance(x, y)` that takes two vectors as its input and outputs the distance between them. Use your function to find the distance between $x=(0,0)$ and $y=(1,1)$.
###Code
def distance(x, y):
# define your function here!
return math.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)
distance((0,0),(1,1))
###Output
_____no_output_____
###Markdown
Exercise 2d- Write a function `in_circle(x, origin)` that determines whether a point in a two dimensional plane falls within a unit circle surrounding a given origin. - Your function should return a boolean `True` if the distance between `x` and `origin` is less than 1 and `False` otherwise. - Use `distance(x, y)` as defined in 2c.- Use your function to determine whether the point (1,1) lies within the unit circle centered at (0,0).
###Code
def in_circle(x, origin = [0,0]):
# Define your function here!
return distance(x, origin) < 1
in_circle((1,1))
###Output
_____no_output_____
###Markdown
Exercise 2e- Create a list `inside` of `R=10000` booleans that determines whether or not a point falls within the unit circle centered at `(0,0)`. - Use the `rand` function from 2b to generate `R` randomly located points. - Use the function `in_circle` to test whether or not a given pint falls within the unit circle.- Find the proportion of points that fall within the circle by summing all `True` values in the `inside` list; then divide the answer by `R` to obtain a proportion.- Print your answer. This proportion is an estimate of the ratio of the two areas!
###Code
random.seed(1)
# write your code here!
R = 10000
inside = [in_circle((rand(), rand())) for _ in range(R)]
prop = sum([1 for b in inside if b]) / R
print(prop)
###Output
_____no_output_____
###Markdown
Exercise 2f- Find the difference between your estimate from part 2e and `math.pi / 4`. Note: `inside` and `R` are defined as in Exercise 2e.
###Code
# write your code here!
print(math.pi / 4 - prop)
###Output
_____no_output_____
###Markdown
Exercise 3A list of numbers representing measurements obtained from a system of interest can often be noisy. One way to deal with noise to smoothen the values by replacing each value with the average of the value and the values of its neighbors. Exercise 3a- Write a function `moving_window_average(x, n_neighbors)` that takes a list `x` and the number of neighbors `n_neighbors` on either side of a given member of the list to consider.- For each value in `x`, `moving_window_average(x, n_neighbors)` computes the average of the value and the values of its neighbors.- `moving_window_average` should return a list of averaged values that is the same length as the original list.- If there are not enough neighbors (for cases near the edge), substitute the original value for a neighbor for each missing neighbor.- Use your function to find the moving window sum of `x=[0,10,5,3,1,5]` and `n_neighbors=1`.
###Code
def moving_window_average(x, n_neighbors=1):
n = len(x)
width = n_neighbors*2 + 1
x = [x[0]]*n_neighbors + x + [x[-1]]*n_neighbors
# To complete the function,
# return a list of the mean of values from i to i+width for all values i from 0 to n-1.
return [(sum(x[i:i+width]) / width) for i in range(n)]
x = [0,10,5,3,1,5]
print(moving_window_average(x, 1))
###Output
_____no_output_____
###Markdown
Exercise 3b- Compute and store `R=1000` random values from 0-1 as `x`.- Compute the moving window average for `x` for values of `n_neighbors` ranging from 1 to 9 inclusive.- Store `x` as well as each of these averages as consecutive lists in a list called `Y`
###Code
random.seed(1) # This line fixes the value called by your function,
# and is used for answer-checking.
# write your code here!
R = 1000
x = [random.uniform(0, 1) for i in range(R)]
Y = [x] + [moving_window_average(x, i) for i in range(1, 10)]
Y[5][9]
###Output
_____no_output_____
###Markdown
Exercise 3c- For each list in `Y`, calculate and store the range (the maximum minus the minimum) in a new list ranges.- Print your answer. As the window width increases, does the range of each list increase or decrease? Why do you think that is?
###Code
# write your code here!
ranges = [max(l) - min(l) for l in Y]
print(ranges)
###Output
_____no_output_____ |
notebooks/train_test_generator_from_csv.ipynb | ###Markdown
Magazine category IM matrix dump* Load* transform to IM matrix with multiple ratings in a list* replace cells with mean of ratings* all empty cells with NaN* sample 20% of occupied cells* dump both train and test set
###Code
import pandas as pd
import numpy as np
import dill
folder_path = "file_server/dataset/ratings"
files = ["Magazine_Subscriptions.csv"]
df = pd.read_csv(folder_path+"/Magazine_Subscriptions.csv", header=None, names=['u_id', 'p_id', 'rating'])
def get_iteraction_matrix_with_mean_ratings(df):
k = pd.get_dummies(df['p_id'])
k.values[k!=0] = df['rating']
k.replace(0, np.nan, inplace=True)
k = pd.concat([df['u_id'], k], axis=1)
k = k.groupby('u_id').mean()
return np.array(k.index), np.array(k.columns), k.values
df = df.iloc[:20000, :]
print(len(df['p_id'].unique()), len(df['u_id'].unique()))
users, items, interaction_matrix = get_iteraction_matrix_with_mean_ratings(df=df)
users.shape, items.shape, interaction_matrix.shape
## Drop products which have no ratings
def get_products_with_no_ratings(s):
col_indices = list()
for i in range(s.shape[1]):
if np.count_nonzero(~np.isnan(s[:,i]))==0:
col_indices.append(i)
print(col_indices)
return col_indices
col_to_remove = get_products_with_no_ratings(interaction_matrix)
interaction_matrix = np.delete(interaction_matrix, col_to_remove, axis=1)
items = np.delete(items, col_to_remove, axis=0)
print(users.shape, items.shape, interaction_matrix.shape)
# sparsity
values_present = np.count_nonzero(~np.isnan(interaction_matrix))
print(values_present)
print(values_present*100/(interaction_matrix.shape[0]*interaction_matrix.shape[1]))
def count_non_na_in_row(row, threshold):
n_ratings = np.count_nonzero(~np.isnan(row))
if n_ratings >= threshold:
return n_ratings, 1
return n_ratings, 0
index_counter = np.empty(0)
ratings_per_user = np.empty(0)
for row in interaction_matrix:
n_ratings, th_cleared = count_non_na_in_row(row, 5)
index_counter = np.append(index_counter, th_cleared)
ratings_per_user = np.append(ratings_per_user, n_ratings)
index_counter, ratings_per_user.mean()
# create train by copying interaction matrix
train = np.copy(interaction_matrix)
# create a nan filled test like train
test = np.empty_like(train)
test[:] = np.nan
import random, math
for i in range(train.shape[0]):
# sample only if index_counter values are 1, we dont want to sample and edit rows below the threshold
if index_counter[i] == 1:
# get indices of non missing values in each row
non_nan_map = ~np.isnan(train[i])
non_nan_indices = [ind for ind,_ in enumerate(non_nan_map) if _]
# randomly sample 20% of non missing indices
sample = random.sample(non_nan_indices, math.ceil(0.2*len(non_nan_indices)))
# set these sampled indices ka value to test, and replace them with NaN in train
for k in sample:
test[i,k] = train[i,k]
train[i,k] = np.nan
# slice test and users as per index_counter, which indicates the
# map of users having more than "threshold" ratings
users_of_interest = users[index_counter==1]
test = test[index_counter==1, :]
# should result in 107 products, checking...
print(train.shape, test.shape)
np.count_nonzero(~np.isnan(interaction_matrix[5])), np.count_nonzero(~np.isnan(train[5])), np.count_nonzero(~np.isnan(test[5]))
dump_location = "file_server/processed_data/iteration1/magazine_subscription_subset_115X18241/"
# dump training and test files
with open(dump_location+"train_matrix.pkl", "wb")as fp:
dill.dump(train, fp)
with open(dump_location+"train_users.pkl", "wb")as fp:
dill.dump(users, fp)
with open(dump_location+"items.pkl", "wb")as fp:
dill.dump(items, fp)
with open(dump_location+"test_matrix.pkl", "wb")as fp:
dill.dump(test, fp)
with open(dump_location+"test_users.pkl", "wb")as fp:
dill.dump(users_of_interest, fp)
###Output
_____no_output_____
###Markdown
Scribble space
###Code
np.count_nonzero(~np.isnan(interaction_matrix[5])), np.count_nonzero(~np.isnan(train[5])), np.count_nonzero(~np.isnan(test[5]))
3/11
non_nan_map = ~np.isnan(train[2])
non_nan_indices = [ind for ind,_ in enumerate(non_nan_map) if _]
print(non_nan_indices)
import random, math
random.sample(non_nan_indices, math.ceil(0.2*len(non_nan_indices)))
temp_var = interaction_matrix[:10, :100]
temp_var.shape
indices = np.random.permutation(temp_var.shape[0])
training_idx, test_idx = indices[:int(0.8*len(temp_var))], indices[int(0.8*len(temp_var)):]
training, test = temp_var[training_idx,:], temp_var[test_idx,:]
training.shape, test.shape
users_train, users_test = users[training_idx], users[test_idx]
users_train
temp_df = df.head(10).copy()
temp_df['p_id'][0] = "AOSFI0JEYU4XM"
temp_df['rating'][0] = 4
temp_df
# k = pd.get_dummies(temp_df, columns=['p_id'], prefix='', prefix_sep='')
# names = list(k.columns).pop(1)
# print(names)
# k.groupby(names).mean()
# k
# f = pd.get_dummies(k['p_id'])
# f.values[f!=0] = k['rating']
# f.values[f==0] = np.nan
# f
# # k = pd.get_dummies(temp_df, columns=['p_id'])
k = pd.get_dummies(temp_df['p_id'])
k.values[k!=0] = temp_df['rating']
k.replace(0, np.nan, inplace=True)
k
k = pd.concat([temp_df['u_id'], k], axis=1)
# # names = k.columns #- ['ratings']
# # names
j = k.groupby('u_id').mean()
np.array(j.index), np.array(j.columns), j.values, j
pd.get_dummies(temp_df, columns=['p_id']).groupby(['u_id'], as_index=True).mean()
import pandas as pd
import numpy as np
import torch
import databricks.koalas as ks
from pyspark.sql import SparkSession
sc.stop()
# import findspark
# findspark.init()
import pyspark
from pyspark import SparkConf, SparkContext
def create_spark_context() -> SparkContext:
spark_conf = SparkConf().setMaster("spark://172.16.10.134:7077").setAppName("Spark_processing")
return SparkContext.getOrCreate(spark_conf)
sc = create_spark_context()
from pyspark import SparkContext, SparkConf
def create_spark_context() -> SparkContext:
spark_conf = SparkConf()\
.setMaster("spark://172.16.10.130:7077")\
.setAppName("Spark_Init_Test")\
.set("spark.executor.memory", "12g")
#.set('spark.rpc.message.maxSize', 300)\
return SparkContext.getOrCreate(spark_conf)
sc = create_spark_context();
sqlcontext = pyspark.SQLContext(sc)
spark = SparkSession.builder.getOrCreate()
spark
folder_path = "file_server/dataset/ratings/"
# files = os.listdir(folder_path)
# print(files)
files = ['Magazine_Subscriptions.csv']
df = pd.read_csv(folder_path+"/"+files[0], header=None, names=['u_id', 'p_id', 'rating'])
sdf = sqlcontext.createDataFrame(df)
sdf.show()
ks.set_option('compute.default_index_type', 'distributed-sequence')
kdf = sdf.to_koalas()
kdf
temp_df = kdf.head(10)
temp_df
l = temp_df.groupby(['u_id','p_id']).size().reset_index().rename(columns={0:'count'})
print(len(ks.unique(temp_df['u_id'])), len(ks.unique(temp_df['p_id'])))
print(len(l))
###Output
_____no_output_____ |
archives/03_Feature_Selection.ipynb | ###Markdown
If you fail to control your datetime typing, you'll inevitably end up with difficulty in aligning and joining data on date, like this:
###Code
# example of a str and a datetime repr which are joined on axis=1 and result in an awkward dataframe
###Output
_____no_output_____
###Markdown
Among the pandas date/time functions is a very useful resampling method, which allows you to aggregate from a higher frequency (e.g., hourly) to a lower frequency (e.g., daily or weekly or monthly). Depending on the timeframe of your strategy, you may seek to resample everything to a lower frequency
###Code
## example of resampling
###Output
_____no_output_____
###Markdown
The other main typing issue I find is with numeric types. Number values are commonly represented as integers, floats, and strings which look like integers or floats. Pandas attempts to guess the right type for data when it's loaded (via `read_csv` or `read_sql` etc..). Problems arise when there are some values within a column which don't follow the type .The below example illustrates how
###Code
df = pd.DataFrame({'symbol':['a','b','c','d','e'],'price':[1,2,3,4,'None']})
print(df)
print()
print('Average: ',df.mean()) # no results
print()
print('######################')
# retype to numeric
print()
df['price'] = pd.to_numeric(df.price,errors='coerce')
print(df)
print()
print('Average: ',df.mean()) # works
###Output
symbol price
0 a 1
1 b 2
2 c 3
3 d 4
4 e None
Average: Series([], dtype: float64)
######################
symbol price
0 a 1.0
1 b 2.0
2 c 3.0
3 d 4.0
4 e NaN
Average: price 2.5
dtype: float64
###Markdown
Handling Missing DataIncomplete data is a reality for us all. Whether it's because some input sources are of a lower frequency, shorter history (i.e., don't go back as far in time) or have unexplained unavailable data points at times, we need a thoughtful approach for addressing missing data.Most machine learning algorithms require a valid value for each feature at each observation point (or they will fail to run...). If we don't apply some sensible workarounds, we'll end up dropping lots of _valid_ data points because of a single missing feature. Before outlining the tactics and code patterns we can apply, my core principles for data cleansing are:1. Always try to reflect the data you might have applied _at the time_ of the missing data point. In other words, don't peek into the future if at all possible. 2. Drop valid data only as a last resort (and as late in the process as possible). 3. Questionable data (i.e., extreme outliers) should be treated like missing data.
###Code
### Formatting
###Output
_____no_output_____
###Markdown
Whew! That was (much) longer than intended. Feature engineering is a broad subject of which I've only scratched the surface. Hopefully this will provide you with a framework and starting point to get your own process up and running so that you can focus on applying your creativity and your expertise on the subject matter of choice.In the next post of this series, I will outline a process [feature selection]() - the next logical step following feature engineering. Questions, comments, or suggestions are welcomed below.
###Code
import numpy as np
arrays = [np.array([1,2,3,4,1,2,3,4]),np.array(['bar', 'bar', 'bar', 'bar', 'foo', 'foo', 'foo', 'foo'])]
s = pd.Series(np.array([100,101,102,103,200,201,202,203]), index=arrays)
s.name='values'
df = pd.DataFrame(s, index=arrays).sort_index()
df.index.names =['day','symbol']
print(df)
print(df.groupby(level='symbol').values.diff())
print(df.groupby(level='symbol').values.pct_change())
my_func = lambda x: x.pct_change()
print(df.groupby(level='symbol').values.apply(my_func))
print(df.groupby(level='symbol').values.diff() / df.groupby(level='symbol').values.shift(1))
###Output
values
day symbol
1 bar 100
foo 200
2 bar 101
foo 201
3 bar 102
foo 202
4 bar 103
foo 203
day symbol
1 bar NaN
foo NaN
2 bar 1.0
foo 1.0
3 bar 1.0
foo 1.0
4 bar 1.0
foo 1.0
Name: values, dtype: float64
day symbol
1 bar NaN
foo 1.000000
2 bar -0.495000
foo 0.990099
3 bar -0.492537
foo 0.980392
4 bar -0.490099
foo 0.970874
Name: values, dtype: float64
day symbol
1 bar NaN
foo NaN
2 bar 0.010000
foo 0.005000
3 bar 0.009901
foo 0.004975
4 bar 0.009804
foo 0.004950
Name: values, dtype: float64
day symbol
1 bar NaN
foo NaN
2 bar 0.010000
foo 0.005000
3 bar 0.009901
foo 0.004975
4 bar 0.009804
foo 0.004950
Name: values, dtype: float64
###Markdown
How-To Guide into Feature Selection IntroductionThis is the third post in my series on transforming data into alpha. If you haven't yet see the [framework overview]() or [feature engineering guide](), please take a minute to read that first... This post is going to delve into the mechanics of _feature selection_, in other words choosing between the many variations of features you've created in the feature engineering stage. By design, many of the features you've created will be very similar to each other (aka "collinear") because you've derived them from the same underlying dataset. MotivationThe previous step of the process, feature engineering, is intended to be a creative, loose process akin to a brainstorming session. The result should be tens (or hundreds) of variations of features to evaluate. However, most models will _generalize_ better (i.e., work well on data they haven't seen) with fewer features. They will also be much more interpretable. Therefore, we need a systematic approach to deciding which of the many posible features to use. That's where the _feature selection_ process comes in. PhilosophyIn feature selection, we strive to meet two goals:1. __Strength__: Choose the features with the strongest, most persistent relationships to the target outcome variable. The reasons for this are obvious.2. __Orthogonality__: Minimize the amount of overlap or collinearity in your selected features. The importance of orthogonality (non-overlap) of features is much greater than you might guess. I am biased towards making feature selection a relatively mechanical process. The "art" should mainly be encapsulated within the prior step (feature engineering) and the subsequent step (modeling). Feature selection should, in my view, follow a heuristic and can be encoded into an algorithm if desired. For purposes of this tutorial, I'll keep things relatively manual. Getting StartedLet's dive in. I will begin by loading the feature set created in the prior step. I'm also going to create the _outcomes_ `DataFrame` as done in the Framework Overview post. Please refer to those if you haven't already.
###Code
import numpy as np
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like # remove once updated pandas-datareader issue is fixed
# https://github.com/pydata/pandas-datareader/issues/534
import pandas_datareader.data as web
%matplotlib inline
def get_symbols(symbols,data_source, begin_date=None,end_date=None):
out = pd.DataFrame()
for symbol in symbols:
df = web.DataReader(symbol, data_source,begin_date, end_date)[['AdjOpen','AdjHigh','AdjLow','AdjClose','AdjVolume']].reset_index()
df.columns = ['date','open','high','low','close','volume'] #my convention: always lowercase
df['symbol'] = symbol # add a new column which contains the symbol so we can keep multiple symbols in the same dataframe
df = df.set_index(['date','symbol'])
out = pd.concat([out,df],axis=0) #stacks on top of previously collected data
return out.sort_index()
prices = get_symbols(['AAPL','CSCO','AMZN','YHOO','MSFT'],data_source='quandl',begin_date='2012-01-01',end_date=None)
prices.sort_index().tail()
outcomes = pd.DataFrame(index=prices.index)
# next day's opening change
outcomes['close_1'] = prices.groupby(level='symbol').close.pct_change(-1) # next day's returns
outcomes['close_5'] = prices.groupby(level='symbol').close.pct_change(-5) # next week's returns
outcomes['close_10'] = prices.groupby(level='symbol').close.pct_change(-10) # next two weeks' returns
outcomes['close_20'] = prices.groupby(level='symbol').close.pct_change(-20) # next month's (approx) returns
outcomes.tail()
###Output
_____no_output_____
###Markdown
For purposes of illustration, we'll engineer some features to contain some signal buried within the noise. Clearly, this is not something we'd do in real usage but will help to demonstrate the concept more clearly. Assume we have a target variable called `outcome` which can be (partially) predicted with three factors, `factor_1`, `factor_2` and `factor_3`. There's also an unpredictble noise component. We will "cheat" and create the overall target variable from these factors. All data will follow the same index as the market data we pulled from quandl.
###Code
num_obs = prices.close.count()
factor_1 = pd.Series(np.random.randn(num_obs),index=prices.index)
factor_2 = pd.Series(np.random.randn(num_obs),index=prices.index)
factor_3 = pd.Series(np.random.randn(num_obs),index=prices.index)
outcome = 1.*factor_1 + 2.*factor_2 + 3.*factor_3 + 5.*np.random.randn(num_obs)
outcome.name = 'outcome'
outcome.tail()
###Output
_____no_output_____
###Markdown
Now, we will engineer several variations on features which each contain some information about the three factors, plus a few which contain some interaction effects, and some which do not contain any useful data. Note that we are, again, "cheating" here for illustration purposes.
###Code
features = pd.DataFrame(index=outcome.index)
features['f11'] = 0.2*factor_1 + 0.8*np.random.randn(num_obs)
features['f12'] = 0.4*factor_1 + 0.6*np.random.randn(num_obs)
features['f13'] = 0.6*factor_1 + 0.4*np.random.randn(num_obs)
features['f21'] = 0.2*factor_2 + 0.8*np.random.randn(num_obs)
features['f22'] = 0.4*factor_2 + 0.8*np.random.randn(num_obs)
features['f23'] = 0.6*factor_2 + 0.4*np.random.randn(num_obs)
features['f31'] = 0.2*factor_3 + 0.8*np.random.randn(num_obs)
features['f32'] = 0.4*factor_3 + 0.6*np.random.randn(num_obs)
features['f33'] = 0.6*factor_3 + 0.4*np.random.randn(num_obs)
features['f41'] = 0.2*factor_1+0.2*factor_2 + 0.6*np.random.randn(num_obs)
features['f42'] = 0.2*factor_2+0.2*factor_3 + 0.6*np.random.randn(num_obs)
features['f43'] = 0.2*factor_3+0.2*factor_1 + 0.6*np.random.randn(num_obs)
features['f51'] = np.random.randn(num_obs)
features['f52'] = np.random.randn(num_obs)
features['f53'] = np.random.randn(num_obs)
features.tail()
###Output
_____no_output_____
###Markdown
Next, we'll import the required packages and modules for the feature selection:
###Code
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import display
from scipy.cluster import hierarchy
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler,Normalizer
###Output
_____no_output_____
###Markdown
Before evaluating the features for predictive strength and orthogonality, we'll do a quick data preparation stage. It is sometimes vital to "standardize" or "normalize" data so that we get fair comparisons between features of differing scale. Strictly speaking, since all of the doctored outcome and feature data is already drawn from normal distribution (using the numpy function `random.rnorm()`) we don't really need this step, but good practice to include. Here, I'll use the scikit-learn `StandardizeScaler()` method and some pandas magic to transform the data.
###Code
#f = features.dropna() #optional - to compare apples to apples
# standardize or normalize data
std_scaler = StandardScaler()
features_scaled = std_scaler.fit_transform(features.dropna())
print (features_scaled.shape)
df = pd.DataFrame(features_scaled,index=features.dropna().index)
df.columns = features.dropna().columns
df.tail()
# standardize outcome as well
outcome_df = outcome.to_frame()
outcome_scaled = std_scaler.fit_transform(outcome_df.dropna())
outcome_scaled = pd.DataFrame(outcome_scaled,index=outcome_df.dropna().index)
outcome_scaled.columns = outcome_df.columns
outcome_scaled.tail()
corr = df.corrwith(outcome)
corr.sort_values().plot.barh(color = 'blue',title = 'Strength of Correlation')
###Output
_____no_output_____
###Markdown
Pretend for a minute that we don't know which features are going to be stronger and weaker, and which are going to tend to cluster together. We've got an idea that there are some quite strong features, some weaker, and some useless. Next, we'll take advantage of a very handy seaborn chart type called a "clustermap" which plots a heatmap representation of a correlation matrix and runs a clustering algorithm to group together the most closely related features. Of course, the diagonal of dark green represents each feature being perfectly correlated with itself.
###Code
corr_matrix = df.corr()
correlations_array = np.asarray(corr_matrix)
linkage = hierarchy.linkage(distance.pdist(correlations_array), method='average')
g = sns.clustermap(corr_matrix,row_linkage=linkage,col_linkage=linkage,row_cluster=True,\
col_cluster=True,figsize=(10,10),cmap='Greens')
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
label_order = corr_matrix.iloc[:,g.dendrogram_row.reordered_ind].columns
###Output
_____no_output_____
###Markdown
The algorithm has done a good job of finding the groupings of features. The cluster in the upper left captures `factor_1` (including some of the interaction effects). `factor_3` is fairly well isolated in the lower right corner, and in the middle we can see `factor_2` as well as some of the noise features. Let's next focus in only on those features with correlations of greater than 0.1 to exclude the noise and weak features.
###Code
correlated_features = corr[corr>0.1].index.tolist()
corr_matrix = df[correlated_features].corr()
correlations_array = np.asarray(corr_matrix)
linkage = hierarchy.linkage(distance.pdist(correlations_array), method='average')
g = sns.clustermap(corr_matrix,row_linkage=linkage,col_linkage=linkage,row_cluster=True,\
col_cluster=True,figsize=(6,6),cmap='Greens')
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
label_order = corr_matrix.iloc[:,g.dendrogram_row.reordered_ind].columns
print("Correlation Strength:")
print(corr[corr>0.1].sort_values(ascending=False))
###Output
_____no_output_____
###Markdown
Ah, now the clusters look a bit sharper. We'll follow a simple heuristic to manually select the features. Those wishing to take this to the next level can decide how to encapsulate into an algorithm. 1. Take the most strongly correlated feature (f33) and add it to our list of selected features.2. Take the second correlated feature (f23) and check to see if it's closely correlated (neighboring in the clustermap) to any features already chosen. If no, add to the list. If yes, discard.3. Repeat this process until either (1) we've reached the target feature count, or (2) we've run out strongly correlated features. Following that heuristic, I get:
###Code
selected_features = ['f33','f23','f42','f41','f31']
###Output
_____no_output_____
###Markdown
Note that this list of features is not simply the highest correlated features. Let's run the clustermap one more time to see if we've missed any major clusters.
###Code
corr_matrix = df[selected_features].corr()
correlations_array = np.asarray(corr_matrix)
linkage = hierarchy.linkage(distance.pdist(correlations_array), method='average')
g = sns.clustermap(corr_matrix,row_linkage=linkage,col_linkage=linkage,row_cluster=True,\
col_cluster=True,figsize=(6,6),cmap='Greens')
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
label_order = corr_matrix.iloc[:,g.dendrogram_row.reordered_ind].columns
import seaborn as sns
sns.pairplot(df[selected_features],size=1.5)
###Output
_____no_output_____
###Markdown
Looks generally pretty good. This can be a bit subjective to determine what's "too close" and what's "too weak", but that's the basic idea. Thus far, we've only taken a simple correlation statistic to be representative of predictive power. In my opinion, that's a good place to start but because financial time series data suffers from [non-stationarity]() and [regime change](), we'll plot the rolling correlation of these selected features to see if any is either (1) less correlated now than in times past or (2) very "hot-and-cold".
###Code
tmp = df[selected_features].join(outcome_scaled).reset_index().set_index('date')
tmp.dropna().resample('Q').apply(lambda x: x.corr()).iloc[:,-1].unstack().iloc[:,:-1].plot()
# shows time stability
###Output
_____no_output_____
###Markdown
As expected, since the data wasn't modeled with any non-stationarity, our features all appear to be robust over time. Z-ScoresA very popular/useful transformation for financial time series data is the [z-score](http://stattrek.com/statistics/dictionary.aspx?definition=z-score). We can easily define a generalized lambda function for this, which we can use whenever needed. Importantly, it allows us to mix together very different symbols (some high-beta, some low-beta) in a way that considers the statistical significance of any movement.
###Code
zscore_fxn = lambda x: (x - x.mean()) / x.std()
features['f09'] =prices.groupby(level='symbol').close.apply(zscore_fxn)
features.f09.unstack().plot.kde(title='Z-Scores (not quite accurate)')
###Output
_____no_output_____
###Markdown
However, the above example has a subtle but important bug. It uses the mean _of the whole time frame_ and the standard deviation _of the whole time frame_ to calculate each datapoint. This means we are peeking ahead into the future and the feature is potentially very danger-prone (it'll work famously well in sample and fail to work out of sample...).Fixing this is cumbersome, but necessary.
###Code
zscore_fun_improved = lambda x: (x - x.rolling(window=200, min_periods=20).mean())/ x.rolling(window=200, min_periods=20).std()
features['f10'] =prices.groupby(level='symbol').close.apply(zscore_fun_improved)
features.f10.unstack().plot.kde(title='Z-Scores (Correct)')
###Output
_____no_output_____
###Markdown
PercentileLess commonly used - but equally useful - is the percentile transformation. Getting this done properly in pandas (with groupby and rolling) is possible but tricky. The below example returns the percentile rank (from 0.00 to 1.00) of traded volume for each value as compared to a trailing 200 day period. Note that we need to use _a lambda within a lambda_ to make this work properly. We're on the bleeding edge.
###Code
rollrank_fxn = lambda x: x.rolling(200,min_periods=20).apply(lambda x: pd.Series(x).rank(pct=True)[0],raw=True)
features['f11'] = prices.groupby(level='symbol').volume.apply(rollrank_fxn)
###Output
_____no_output_____
###Markdown
Another interesting application of this same pattern is to rank each stock _cross-sectionally_ rather than _longitudinally_ as above. In other words, where does this stock rank within all of the stocks on that day, not for all prior days of that stock. The below example isn't very meaningful with only two stocks, but quite useful when using a realistic universe. In this example, we're also making use of an earlier feature (relative volume) to compare which symbol is most heavily traded _for that stock's normal range_ in a given day. Also note that we need to `dropna()` prior to ranking because `rank` doesn't handle nulls very gracefully.
###Code
features['f12'] = features['f07'].dropna().groupby(level='date').rank(pct=True)
###Output
_____no_output_____
###Markdown
Technical AnalysisThose with a taste for technical analysis may find it difficult to let go of your favored TA techniques. While this is not _my_ favored approach, you'll have no problem engineering features using these methods. From my cursory googling, it looked as though the `ta` package would be a good place to start. Very new and only one contributor but it looks fairly complete and well documented. If you find that it's missing your favorite indicators, consider contributing to the package. If you know of better such packages, please post in the comments below... You may consider mean-centering a technical indicator so that machine learning methods can make better use of the data (or make sure to include that in the pre-processing pipeline when you start modeling).
###Code
import ta # technical analysis library: https://technical-analysis-library-in-python.readthedocs.io/en/latest/
# money flow index (14 day)
features['f13'] = ta.momentum.money_flow_index(prices.high, prices.low, prices.close, prices.volume, n=14, fillna=False)
# mean-centered money flow index
features['f14'] = features['f13'] - features['f13'].rolling(200,min_periods=20).mean()
###Output
_____no_output_____
###Markdown
Alternative RepresentationsA bit different than transforms are "representations", i.e., other ways to represent continuous values. All of the transforms above returned continuous values rather than "labels", and that's often a good place to start - especally for early prototypes.However, you may want to represent the data in different ways, especially if using classification-based approaches or worried about the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality) due to large numbers of features. BinningWe can easily convert a continous variable to discrete "bins" (like 1 to 10). This loses information, of course, but sometimes loss of information is a good thing if you are removing more noise than signal. The below example shows volumes converted into ten equally sized buckets. In other words, we've converted a continuous variable into a discrete one. NOTE: this example is not applied in a rolling fashion, so it __does suffer from some data peeking__, a cardinal sin. At the moment, I'm failing in my efforts to implement it in a rolling way. I'd be grateful for code snippets if anyone knows how to do this offhand.
###Code
n_bins = 10
bin_fxn = lambda y: pd.qcut(y,q=n_bins,labels = range(1,n_bins+1))
features['f15'] = prices.volume.groupby(level='symbol').apply(bin_fxn)
###Output
_____no_output_____
###Markdown
SignVery simply, you may wish to convert continuous variables into positive or negative (1 or -1) values, depending on input. For instance, was volume increasing or decreasing today?
###Code
features['f16'] = features['f05'].apply(np.sign)
###Output
_____no_output_____
###Markdown
Plus-MinusYou may be interested in how many days in a row a value has increased (or decreased). Below is a simple pattern to do just that - it calculates the number of up-days minus the number of down days.
###Code
plus_minus_fxn = lambda x: x.rolling(20).sum()
features['f17'] = features['f16'].groupby(level='symbol').apply(plus_minus_fxn)
###Output
_____no_output_____
###Markdown
One-Hot EncodingPossibly the most frequently used alternative representation is "one-hot encoding" where a categorical variable is represented as a binary. For instance, month_of_year would be represented as twelve different columns, each of which was either 0 or 1. January would be [1,0,0,0,...0] etc... This is absolutely crucial in a few circumstances. The first is where there is false meaning in the "ordinality" of values. If we were looking to test the "santa claus effect" hypothesis, it wouldn't be helpful to use a month_of_year feature where January was "the least" and December was "the most". The second is in cases where we are representing events or "states". Does the word "lawsuit" appear within the 10-Q footnotes? Is the company in the blackout period for share buybacks? Finally, the particular machine learning algorithm (tree-based, neural networks) may find it easier to use binary representations than continuous or discrete ones. The below example creates twelve one-hot features, one for each month, and names them automatically
###Code
month_of_year = prices.index.get_level_values(level='date').month
one_hot_frame = pd.DataFrame(pd.get_dummies(month_of_year))
one_hot_frame.index = prices.index # Careful! This is forcing index values without usual pandas alignments!
# create column names
begin_num = int(features.columns[-1][-2:]) + 1 #first available feature
feat_names = ['f'+str(num) for num in list(range(begin_num,begin_num+12,1))]
# rename columns and merge
one_hot_frame.columns = feat_names
features = features.join(one_hot_frame)
###Output
_____no_output_____
###Markdown
Data CleansingOK, I've put this off long enough. It's time to cover the least interesting and possibly most critical aspect of feature engineering... data cleansing! Many will include data cleansing as part of the raw data collection pipeline rather than the feature engineering step - and I can't argue with cleansing data as early in the process as possible. However, your data can never be too clean so I take the "belt and suspenders" approach. Clean your data on collection, clean on usage. Clean, clean, clean! The motivation for * to_datetime, to_numeric, astype() (int, string, float...)* fillna(ffill, 0, mean) Data TypingIf you've spent any time with data work in python, you're already familiar with the sometimes annoying data typing issues of a "duck typed" language. Pandas does an admirable job of inferring types from your data but you'll sometimes want to exercise more control to make sure your data is perfect. The first data typing issue I face is representation of dates and times, which can be represented in several different formats. I prefer to standardize all datetimes using the pandas pd.to_datetime() method which yields two main benefits: (1) you will be able to align and join multiple datetime values together and (2) you'll be able to take advantage of the many pandas date/time functions.Example:
###Code
## code of casting to datetime, selecting weekday etc...
###Output
_____no_output_____ |
examples/SpanishExamples/TabuEjemplo.ipynb | ###Markdown
Búsqueda TabúLa librería **Pyristic** incluye una clase llamada `TabuSearch` que facilita la implementación de una metaheurística basada en Búsqueda Tabú para resolver problemas de minimización. Para poder utilizar esta clase es necesario:1. Definir: * La función objetivo $f$. * La lista de restricciones. * Estructura de datos (opcional).2. Crear una clase que herede de `TabuSearch`.3. Sobreescribir las siguientes funciones de la clase `TabuSearch`: * get_neighbors (requerido) * encode_change (requerido)A continuación se muestran las librerías y elementos que se deben importar. Posteriormente, se resolverán dos problemas de optimización combinatoria usando la clase `TabuSearch`.
###Code
from pyristic.heuristic.Tabu_search import TabuSearch
from pyristic.utils.helpers import *
from pprint import pprint
import numpy as np
import copy
###Output
_____no_output_____
###Markdown
Problema de la mochila\begin{equation} \label{eq:KP} \begin{array}{rll} \text{maximizar:} & f(\vec{x}) = \sum_{i=1}^{n} p_i \cdot x_{i} & \\ \text{donde: } & g_1(\vec{x}) = \sum_{i=1}^{n} w_i \cdot x_{i} \leq c & \\ & x_i \in \{0,1\} & i\in\{1,\ldots,n\}\\ \end{array}\end{equation}Consideremos la siguiente entrada:- $n = 5$- $p = \{5, 14, 7, 2, 23\}$- $w = \{2, 3, 7, 5, 10\}$- $c = 15$Donde la mejor solución es:$x = [1, 1, 0, 0, 1]$ , $f(x) = 42$ y $g_{1}(x) = 15$ Función objetivo Dado que la clase `TabuSearch` considera problemas de minimización, es necesario convertir el problema de la mochila a un problema de minimización. Para esto se multiplica el valor de la función objetivo por -1.
###Code
def f(x : np.ndarray) -> float:
p = np.array([5,14,7,2,23])
return -1*np.dot(x,p)
###Output
_____no_output_____
###Markdown
Restricciones Las restricciones se definen en funciones diferentes y se agregan a una lista.
###Code
def g1(x : np.ndarray) -> bool:
w = [2,3,7,5,10]
return np.dot(x,w) <= 15
constraints_list= [g1]
###Output
_____no_output_____
###Markdown
En el problema de la mochila unicamente queremos revisar que no se exceda el peso. Uso de `TabuSearch`Para poder hacer uso de la metaheurística de búsqueda tabú implementada en la librería **Pyristic**, es necesario crear una clase que herede de la clase `TabuSearch`.
###Code
class Knapsack_solver(TabuSearch):
def __init__(self, f_ : function_type , constraints_: list):
super().__init__(f_,constraints_)
def get_neighbors(self, x : np.ndarray,**kwargs) -> list:
neighbors_list = []
for i in range(len(x)):
x[i] ^= 1 #1
neighbors_list+=[copy.deepcopy(x)]
x[i] ^= 1
return neighbors_list
def encode_change(self, neighbor : (list,np.ndarray), x : (list,np.ndarray),**kwargs) -> list: #2
x_ = [None,None]
for i in range(len(x)):
if x[i] != neighbor[i]:
return [i,neighbor[i]]
return x_
###Output
_____no_output_____
###Markdown
La nueva clase es llamada *Knapsack_solver*, donde, se han sobrescrito las funciones `get_neighbors` y `encode_change`. Si no implementamos las funciones mencionadas el algoritmo no va a funcionar. Ejecución de la metaheurística Una vez definida la clase *Knapsack_solver*, se crea un objeto de tipo *Knapsack_solver* indicando en los parámetros la función objetivo y las restricciones del problema. En este caso llamamos *Knapsack* al objeto creado.
###Code
Knapsack = Knapsack_solver(f, [g1])
###Output
_____no_output_____
###Markdown
Finalmente, se llama a la función `optimize`. Esta función recibe tres parámetros:* Solución inicial o función generadora de soluciones iniciales.* El número de iteraciones.* El tiempo donde evitaremos hacer un cambio en cierta posición (tiempo tabú).Para este ejemplo usamos una mochila vacía ($x_0 = [0,0,0,0,0]$), $30$ iteraciones y un tiempo tabú igual a $3$.
###Code
init_backpack_solution = np.zeros(5,dtype=int)
'''Parameters:
Initial solution
Number of iterations
Tabu time
'''
Knapsack.optimize(init_backpack_solution,30,3)
print(Knapsack)
###Output
Tabu search:
f(X) = -42
X = [1 1 0 0 1]
###Markdown
A continuación resolveremos el mismo problema para una instancia más grande. Tenemos que definir nuevamente la función objetivo y la restricción para emplearlo para cualquier instancia del problema.Definiremos las siguientes variables como variables globales:* n es un número que indicará el tamaño de nuestra instancia.* p es un arreglo que se refiere al beneficio que proporciona cada uno de los objetos.* w es un arreglo con el peso de cada uno de los objetos.* c es el peso máximo que puede tener nuestra mochila.
###Code
n = 50
p = [60, 52, 90, 57, 45, 64, 60, 45, 63, 94, 44, 90, 66, 64, 32, 39, 91, 40, 73, 61, 82, 94, 39, 68, 94, 98, 80, 79, 73, 99, 49, 56, 69, 49, 82, 99, 65, 34, 31, 85, 67, 62, 56, 38, 54, 81, 98, 63, 48, 83]
w = [38, 20, 21, 21, 37, 28, 32, 30, 33, 35, 29, 32, 35, 24, 28, 29, 22, 34, 31, 36, 36, 28, 38, 25, 38, 37, 20, 23, 39, 31, 27, 20, 38, 38, 36, 28, 39, 22, 23, 22, 21, 24, 23, 33, 31, 30, 32, 30, 22, 37]
c = 870
def f(x : np.ndarray) -> float:
global p
return -1* np.dot(x,p)
def g1(x : np.ndarray) -> bool:
global w,c
result = np.dot(x,w)
g1.__doc__="{} <= {}".format(result,c)
return result <= c
constraints_list= [g1]
###Output
_____no_output_____
###Markdown
Solución inicialEn el ejemplo anterior, la solución inicial fue una mochila vacía. Ahora crearemos una mochila que introduce objetos de manera aleatoria, mientras no se exceda el peso de la mochila.
###Code
def getInitialSolution(NumObjects=5):
global n,p,w,c
#Empty backpack
x = [0 for i in range(n)]
weight_x = 0
#Random order to insert objects.
objects = list(range(n))
np.random.shuffle(objects)
for o in objects[:NumObjects]:
#Check the constraint about capacity.
if weight_x + w[o] <= c:
x[o] = 1
weight_x += w[o]
return np.array(x)
###Output
_____no_output_____
###Markdown
Definiremos nuestro objeto del tipo *Knapsack_solver* y llamaremos el método `optimize`con los siguientes parámetros:* La función que crea la solución inicial.* $100$ iteraciones.* El tiempo tabú será $\frac{n}{2}$.
###Code
Knapsack_2 = Knapsack_solver(f, [g1])
Knapsack_2.optimize(getInitialSolution,100,n//2)
print(Knapsack_2)
###Output
Tabu search:
f(X) = -2207
X = [0 0 1 0 0 1 0 0 0 1 0 1 1 0 0 0 1 1 1 0 1 1 0 1 1 1 1 1 1 1 1 0 1 0 1 1 1
0 0 1 1 0 0 0 1 1 1 0 0 1]
Constraints:
856 <= 870
###Markdown
Para revisar el comportamiento de la metaheurística en determinado problema, la librería **Pyristic** cuenta con una función llamada `get_stats`. Esta función se encuentra en **utils.helpers** y recibe como parámetros:* El objeto creado para ejecutar la metaheurística.* El número de veces que se quiere ejecutar la metaheurística.* Los argumentos que recibe la función `optimize` (debe ser una tupla).La función `get_stats` retorna un diccionario con algunas estadísticas de las ejecuciones.
###Code
args = (getInitialSolution,500,n//2)
statistics = get_stats(Knapsack_2, 21, args)
pprint(statistics)
###Output
{'Best solution': {'f': -2310,
'x': array([0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1,
0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
0, 1, 1, 0, 0, 1])},
'Mean': -2261.714285714286,
'Median': -2270.0,
'Standard deviation': 37.13186650579367,
'Worst solution': {'f': -2193,
'x': array([0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
0, 1, 1, 0, 0, 1])}}
###Markdown
Problema del agente viajero\begin{equation} \label{eq:TSP} \begin{array}{rll} \text{minimizar:} & f(x) = d(x_n, x_1) + \sum_{i=1}^{n-1} d(x_i, x_{i+1}) & \\ \text{tal que: } & x_i \in \{1,2,\cdots,n\} & \\ \end{array}\end{equation}Donde:* $d(x_i,x_j)$ es la distancia desde la ciudad $x_i$ a la ciudad $x_j$.* $n$ es el número de ciudades.* $x$ es una permutación de las $n$ ciudades.
###Code
import random
num_cities = 10
iterations = 100
dist_matrix = \
[\
[0,49,30,53,72,19,76,87,45,48],\
[49,0,19,38,32,31,75,69,61,25],\
[30,19,0,41,98,56,6,6,45,53],\
[53,38,41,0,52,29,46,90,23,98],\
[72,32,98,52,0,63,90,69,50,82],\
[19,31,56,29,63,0,60,88,41,95],\
[76,75,6,46,90,60,0,61,92,10],\
[87,69,6,90,69,88,61,0,82,73],\
[45,61,45,23,50,41,92,82,0,5],\
[48,25,53,98,82,95,10,73,5,0],\
]
def f_salesman(x : np.ndarray) -> float:
global dist_matrix
total_dist = 0
for i in range(1,len(x)):
u,v = x[i], x[i-1]
total_dist+= dist_matrix[u][v]
total_dist += dist_matrix[x[-1]][0]
return total_dist
def g_salesman(x : np.ndarray) -> bool:
"""
Xi in {1,2, ... , N}
"""
size = len(x)
size_ = len(np.unique(x))
return size == size_
###Output
_____no_output_____
###Markdown
En este ejemplo mostraremos la forma de definir nuestra lista tabú para el problema del agente viajero para emplearla en nuestra búsqueda `TabuSearch`. Es necesario que nuestra lista tabú contenga los siguientes métodos:- `reset`- `update`- `push`- `find`
###Code
class Tabu_Salesman_list:
def __init__(self,timer):
self.__TB = {}
self.timer = timer
def reset(self,timer) -> None:
self.__TB = {}
self.timer = timer
def update(self) -> None:
to_pop = []
for key in self.__TB:
if self.__TB[key]-1 == 0:
to_pop.append(key)
else:
self.__TB[key]-=1
for key in to_pop:
self.__TB.pop(key)
@checkargs
#x has [p,v,step], we are only interested in v (value)
def push(self, x : list ) -> None:
self.__TB[x[1]] = self.timer
@checkargs
def find(self, x : list) -> bool:
return x[1] in self.__TB
class TravellingSalesman_solver(TabuSearch):
def __init__(self, f_ : function_type , constraints_: list, TabuStorage):
super().__init__(f_,constraints_,TabuStorage)
@checkargs
def get_neighbors(self, x : np.ndarray,**kwargs) -> list:
neighbors_list = []
ind = random.randint(1,len(x)-1)
while self.TL.find([-1,x[ind]]):
ind = random.randint(1,len(x)-1)
v = x[ind]
x_tmp = list(x[v != x])
for i in range(1, len(x)):
if ind == i:
continue
neighbors_list += [ x_tmp[:i] + [v] + x_tmp[i:]]
return neighbors_list
@checkargs
def encode_change(self, neighbor : (list,np.ndarray), x : (list,np.ndarray),**kwargs) -> list: #2
x_p ={x[i] : i for i in range(len(x))}
n_p = {neighbor[i]: i for i in range(len(x))}
ind = -1
max_dist = -1
value = -1
for i in range(1, len(x)):
v = x[i]
dist = abs(x_p[v] - n_p[v])
if dist > max_dist:
ind = i
max_dist = dist
value = v
return [ind , value]
###Output
_____no_output_____
###Markdown
Solución inicialEn este caso, creamos la solución inicial utilizando una estrategia voraz.
###Code
def getInitialSolutionTS(distance_matrix, total_cities):
Solution = [0]
remaining_cities = list(range(1,total_cities))
while len(remaining_cities) != 0:
from_ =Solution[-1]
to_ = remaining_cities[0]
dist = distance_matrix[from_][to_]
for i in range(1, len(remaining_cities)):
distance = distance_matrix[from_][remaining_cities[i]]
if distance < dist:
to_ = remaining_cities[i]
dist = distance
Solution.append(to_)
ind = remaining_cities.index(to_)
remaining_cities.pop(ind)
return Solution
TravellingSalesman = TravellingSalesman_solver(f_salesman,[g_salesman],Tabu_Salesman_list(num_cities//2))
init_path = np.array(getInitialSolutionTS(dist_matrix,num_cities))
print("Initialize search with this initial point {} \n f(x) = {}".format(init_path, f_salesman(init_path)))
TravellingSalesman.optimize(init_path, iterations, num_cities//2)
print(TravellingSalesman)
args = (init_path, iterations, num_cities//2)
statistics = get_stats(TravellingSalesman, 30, args)
pprint(statistics)
###Output
{'Best solution': {'f': 248, 'x': array([0, 5, 3, 8, 9, 6, 2, 7, 4, 1])},
'Mean': 248.0,
'Median': 248.0,
'Standard deviation': 0.0,
'Worst solution': {'f': 248, 'x': array([0, 5, 3, 8, 9, 6, 2, 7, 4, 1])}}
###Markdown
Búsqueda TabúLa librería **Pyristic** incluye una clase llamada `TabuSearch` que facilita la implementación de una metaheurística basada en Búsqueda Tabú para resolver problemas de minimización. Para poder utilizar esta clase es necesario:1. Definir: * La función objetivo $f$. * La lista de restricciones. * Estructura de datos (opcional).2. Crear una clase que herede de `TabuSearch`.3. Sobreescribir las siguientes funciones de la clase `TabuSearch`: * get_neighbors (requerido) * encode_change (requerido)A continuación se muestran las librerías y elementos que se deben importar. Posteriormente, se resolverán dos problemas de optimización combinatoria usando la clase `TabuSearch`.
###Code
import sys
import os
#library_path is the path where the Optimpy library is located.
library_path = "/home/dell/Documentos/Git_proejcts/pyristic/"
#library_path = "/Users/adrianamenchacamendez/Documentos/enes_morelia/papime/optimizacion-con-metaheuristicas/"
sys.path.append(os.path.abspath(library_path))
from pyristic.heuristic.Tabu_search import TabuSearch
from pyristic.utils.helpers import *
from pprint import pprint
import numpy as np
import copy
###Output
_____no_output_____
###Markdown
Problema de la mochila\begin{equation} \label{eq:KP} \begin{array}{rll} \text{maximizar:} & f(\vec{x}) = \sum_{i=1}^{n} p_i \cdot x_{i} & \\ \text{donde: } & g_1(\vec{x}) = \sum_{i=1}^{n} w_i \cdot x_{i} \leq c & \\ & x_i \in \{0,1\} & i\in\{1,\ldots,n\}\\ \end{array}\end{equation}Consideremos la siguiente entrada:- $n = 5$- $p = \{5, 14, 7, 2, 23\}$- $w = \{2, 3, 7, 5, 10\}$- $c = 15$Donde la mejor solución es:$x = [1, 1, 0, 0, 1]$ , $f(x) = 42$ y $g_{1}(x) = 15$ Función objetivo Dado que la clase `TabuSearch` considera problemas de minimización, es necesario convertir el problema de la mochila a un problema de minimización. Para esto se multiplica el valor de la función objetivo por -1.
###Code
def f(x : np.ndarray) -> float:
p = np.array([5,14,7,2,23])
return -1*np.dot(x,p)
###Output
_____no_output_____
###Markdown
Restricciones Las restricciones se definen en funciones diferentes y se agregan a una lista.
###Code
def g1(x : np.ndarray) -> bool:
w = [2,3,7,5,10]
return np.dot(x,w) <= 15
constraints_list= [g1]
###Output
_____no_output_____
###Markdown
En el problema de la mochila unicamente queremos revisar que no se exceda el peso. Uso de `TabuSearch`Para poder hacer uso de la metaheurística de búsqueda tabú implementada en la librería **Pyristic**, es necesario crear una clase que herede de la clase `TabuSearch`.
###Code
class Knapsack_solver(TabuSearch):
def __init__(self, f_ : function_type , constraints_: list):
super().__init__(f_,constraints_)
def get_neighbors(self, x : np.ndarray,**kwargs) -> list:
neighbors_list = []
for i in range(len(x)):
x[i] ^= 1 #1
neighbors_list+=[copy.deepcopy(x)]
x[i] ^= 1
return neighbors_list
def encode_change(self, neighbor : (list,np.ndarray), x : (list,np.ndarray),**kwargs) -> list: #2
x_ = [None,None]
for i in range(len(x)):
if x[i] != neighbor[i]:
return [i,neighbor[i]]
return x_
###Output
_____no_output_____
###Markdown
La nueva clase es llamada *Knapsack_solver*, donde, se han sobrescrito las funciones `get_neighbors` y `encode_change`. Si no implementamos las funciones mencionadas el algoritmo no va a funcionar. Ejecución de la metaheurística Una vez definida la clase *Knapsack_solver*, se crea un objeto de tipo *Knapsack_solver* indicando en los parámetros la función objetivo y las restricciones del problema. En este caso llamamos *Knapsack* al objeto creado.
###Code
Knapsack = Knapsack_solver(f, [g1])
###Output
_____no_output_____
###Markdown
Finalmente, se llama a la función `optimize`. Esta función recibe tres parámetros:* Solución inicial o función generadora de soluciones iniciales.* El número de iteraciones.* El tiempo donde evitaremos hacer un cambio en cierta posición (tiempo tabú).Para este ejemplo usamos una mochila vacía ($x_0 = [0,0,0,0,0]$), $30$ iteraciones y un tiempo tabú igual a $3$.
###Code
init_backpack_solution = np.zeros(5,dtype=int)
'''Parameters:
Initial solution
Number of iterations
Tabu time
'''
Knapsack.optimize(init_backpack_solution,30,3)
print(Knapsack)
###Output
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 30/30 [00:00<00:00, 1586.49it/s]
###Markdown
A continuación resolveremos el mismo problema para una instancia más grande. Tenemos que definir nuevamente la función objetivo y la restricción para emplearlo para cualquier instancia del problema.Definiremos las siguientes variables como variables globales:* n es un número que indicará el tamaño de nuestra instancia.* p es un arreglo que se refiere al beneficio que proporciona cada uno de los objetos.* w es un arreglo con el peso de cada uno de los objetos.* c es el peso máximo que puede tener nuestra mochila.
###Code
n = 50
p = [60, 52, 90, 57, 45, 64, 60, 45, 63, 94, 44, 90, 66, 64, 32, 39, 91, 40, 73, 61, 82, 94, 39, 68, 94, 98, 80, 79, 73, 99, 49, 56, 69, 49, 82, 99, 65, 34, 31, 85, 67, 62, 56, 38, 54, 81, 98, 63, 48, 83]
w = [38, 20, 21, 21, 37, 28, 32, 30, 33, 35, 29, 32, 35, 24, 28, 29, 22, 34, 31, 36, 36, 28, 38, 25, 38, 37, 20, 23, 39, 31, 27, 20, 38, 38, 36, 28, 39, 22, 23, 22, 21, 24, 23, 33, 31, 30, 32, 30, 22, 37]
c = 870
def f(x : np.ndarray) -> float:
global p
return -1* np.dot(x,p)
def g1(x : np.ndarray) -> bool:
global w,c
result = np.dot(x,w)
g1.__doc__="{} <= {}".format(result,c)
return result <= c
constraints_list= [g1]
###Output
_____no_output_____
###Markdown
Solución inicialEn el ejemplo anterior, la solución inicial fue una mochila vacía. Ahora crearemos una mochila que introduce objetos de manera aleatoria, mientras no se exceda el peso de la mochila.
###Code
def getInitialSolution(NumObjects=5):
global n,p,w,c
#Empty backpack
x = [0 for i in range(n)]
weight_x = 0
#Random order to insert objects.
objects = list(range(n))
np.random.shuffle(objects)
for o in objects[:NumObjects]:
#Check the constraint about capacity.
if weight_x + w[o] <= c:
x[o] = 1
weight_x += w[o]
return np.array(x)
###Output
_____no_output_____
###Markdown
Definiremos nuestro objeto del tipo *Knapsack_solver* y llamaremos el método `optimize`con los siguientes parámetros:* La función que crea la solución inicial.* $100$ iteraciones.* El tiempo tabú será $\frac{n}{2}$.
###Code
Knapsack_2 = Knapsack_solver(f, [g1])
Knapsack_2.optimize(getInitialSolution,100,n//2)
print(Knapsack_2)
###Output
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:00<00:00, 454.23it/s]
###Markdown
Para revisar el comportamiento de la metaheurística en determinado problema, la librería **Pyristic** cuenta con una función llamada `get_stats`. Esta función se encuentra en **utils.helpers** y recibe como parámetros:* El objeto creado para ejecutar la metaheurística.* El número de veces que se quiere ejecutar la metaheurística.* Los argumentos que recibe la función `optimize` (debe ser una tupla).La función `get_stats` retorna un diccionario con algunas estadísticas de las ejecuciones.
###Code
args = (getInitialSolution,500,n//2)
statistics = get_stats(Knapsack_2, 21, args)
pprint(statistics)
###Output
{'Best solution': {'f': -2309,
'x': array([0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1,
0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0,
0, 1, 1, 1, 0, 1])},
'Mean': -2252.7619047619046,
'Median': -2254.0,
'Standard deviation': 38.34418657359848,
'Worst solution': {'f': -2187,
'x': array([0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1,
0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1,
0, 1, 1, 0, 0, 1])},
'averageTime': 0.39534649394807364,
'objectiveFunction': [-2214,
-2279,
-2284,
-2263,
-2305,
-2281,
-2309,
-2187,
-2273,
-2215,
-2222,
-2225,
-2254,
-2302,
-2230,
-2195,
-2307,
-2220,
-2248,
-2211,
-2284]}
###Markdown
Problema del agente viajero\begin{equation} \label{eq:TSP} \begin{array}{rll} \text{minimizar:} & f(x) = d(x_n, x_1) + \sum_{i=1}^{n-1} d(x_i, x_{i+1}) & \\ \text{tal que: } & x_i \in \{1,2,\cdots,n\} & \\ \end{array}\end{equation}Donde:* $d(x_i,x_j)$ es la distancia desde la ciudad $x_i$ a la ciudad $x_j$.* $n$ es el número de ciudades.* $x$ es una permutación de las $n$ ciudades.
###Code
import random
num_cities = 10
iterations = 100
dist_matrix = \
[\
[0,49,30,53,72,19,76,87,45,48],\
[49,0,19,38,32,31,75,69,61,25],\
[30,19,0,41,98,56,6,6,45,53],\
[53,38,41,0,52,29,46,90,23,98],\
[72,32,98,52,0,63,90,69,50,82],\
[19,31,56,29,63,0,60,88,41,95],\
[76,75,6,46,90,60,0,61,92,10],\
[87,69,6,90,69,88,61,0,82,73],\
[45,61,45,23,50,41,92,82,0,5],\
[48,25,53,98,82,95,10,73,5,0],\
]
def f_salesman(x : np.ndarray) -> float:
global dist_matrix
total_dist = 0
for i in range(1,len(x)):
u,v = x[i], x[i-1]
total_dist+= dist_matrix[u][v]
total_dist += dist_matrix[x[-1]][0]
return total_dist
def g_salesman(x : np.ndarray) -> bool:
"""
Xi in {1,2, ... , N}
"""
size = len(x)
size_ = len(np.unique(x))
return size == size_
###Output
_____no_output_____
###Markdown
En este ejemplo mostraremos la forma de definir nuestra lista tabú para el problema del agente viajero para emplearla en nuestra búsqueda `TabuSearch`. Es necesario que nuestra lista tabú contenga los siguientes métodos:- `reset`- `update`- `push`- `find`
###Code
class Tabu_Salesman_list:
def __init__(self,timer):
self.__TB = {}
self.timer = timer
def reset(self,timer) -> None:
self.__TB = {}
self.timer = timer
def update(self) -> None:
to_pop = []
for key in self.__TB:
if self.__TB[key]-1 == 0:
to_pop.append(key)
else:
self.__TB[key]-=1
for key in to_pop:
self.__TB.pop(key)
@checkargs
#x has [p,v,step], we are only interested in v (value)
def push(self, x : list ) -> None:
self.__TB[x[1]] = self.timer
@checkargs
def find(self, x : list) -> bool:
return x[1] in self.__TB
class TravellingSalesman_solver(TabuSearch):
def __init__(self, f_ : function_type , constraints_: list, TabuStorage):
super().__init__(f_,constraints_,TabuStorage)
@checkargs
def get_neighbors(self, x : np.ndarray,**kwargs) -> list:
neighbors_list = []
ind = random.randint(1,len(x)-1)
while self.TL.find([-1,x[ind]]):
ind = random.randint(1,len(x)-1)
v = x[ind]
x_tmp = list(x[v != x])
for i in range(1, len(x)):
if ind == i:
continue
neighbors_list += [ x_tmp[:i] + [v] + x_tmp[i:]]
return neighbors_list
@checkargs
def encode_change(self, neighbor : (list,np.ndarray), x : (list,np.ndarray),**kwargs) -> list: #2
x_p ={x[i] : i for i in range(len(x))}
n_p = {neighbor[i]: i for i in range(len(x))}
ind = -1
max_dist = -1
value = -1
for i in range(1, len(x)):
v = x[i]
dist = abs(x_p[v] - n_p[v])
if dist > max_dist:
ind = i
max_dist = dist
value = v
return [ind , value]
###Output
_____no_output_____
###Markdown
Solución inicialEn este caso, creamos la solución inicial utilizando una estrategia voraz.
###Code
def getInitialSolutionTS(distance_matrix, total_cities):
Solution = [0]
remaining_cities = list(range(1,total_cities))
while len(remaining_cities) != 0:
from_ =Solution[-1]
to_ = remaining_cities[0]
dist = distance_matrix[from_][to_]
for i in range(1, len(remaining_cities)):
distance = distance_matrix[from_][remaining_cities[i]]
if distance < dist:
to_ = remaining_cities[i]
dist = distance
Solution.append(to_)
ind = remaining_cities.index(to_)
remaining_cities.pop(ind)
return Solution
TravellingSalesman = TravellingSalesman_solver(f_salesman,[g_salesman],Tabu_Salesman_list(num_cities//2))
init_path = np.array(getInitialSolutionTS(dist_matrix,num_cities))
print("Initialize search with this initial point {} \n f(x) = {}".format(init_path, f_salesman(init_path)))
TravellingSalesman.optimize(init_path, iterations, num_cities//2)
print(TravellingSalesman)
args = (init_path, iterations, num_cities//2)
statistics = get_stats(TravellingSalesman, 30, args)
pprint(statistics)
###Output
{'Best solution': {'f': 248, 'x': array([0, 5, 3, 8, 9, 6, 2, 7, 4, 1])},
'Mean': 248.0,
'Median': 248.0,
'Standard deviation': 0.0,
'Worst solution': {'f': 248, 'x': array([0, 5, 3, 8, 9, 6, 2, 7, 4, 1])}}
|
fig_split_mnist/Figure split MNIST.ipynb | ###Markdown
Parameters
###Code
select = tf.select if hasattr(tf, 'select') else tf.where
# Data params
input_dim = 784
output_dim = 10
# Network params
n_hidden_units = 256
activation_fn = tf.nn.relu
# Optimization params
batch_size = 64
epochs_per_task = 10
n_stats = 10
# Reset optimizer after each age
reset_optimizer = True
###Output
_____no_output_____
###Markdown
Construct datasets
###Code
task_labels = [[0,1], [2,3]]#, [4,5], [6,7], [8,9]]
task_labels = [[0,1], [2,3], [4,5], [6,7], [8,9]]
# task_labels = [[0,1,2,3,4], [5,6,7,8,9]]
n_tasks = len(task_labels)
training_datasets = utils.construct_split_mnist(task_labels, split='train')
validation_datasets = utils.construct_split_mnist(task_labels, split='test')
# training_datasets = utils.mk_training_validation_splits(full_datasets, split_fractions=(0.9, 0.1))
###Output
_____no_output_____
###Markdown
Construct network, loss, and updates
###Code
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.InteractiveSession(config=config)
sess.run(tf.global_variables_initializer())
# tf.equal(output_mask[None, :], 1.0)
import keras.backend as K
import keras.activations as activations
output_mask = tf.Variable(tf.zeros(output_dim), name="mask", trainable=False)
def masked_softmax(logits):
# logits are [batch_size, output_dim]
x = select(tf.tile(tf.equal(output_mask[None, :], 1.0), [tf.shape(logits)[0], 1]), logits, -1e32 * tf.ones_like(logits))
return activations.softmax(x)
def set_active_outputs(labels):
new_mask = np.zeros(output_dim)
for l in labels:
new_mask[l] = 1.0
sess.run(output_mask.assign(new_mask))
print(sess.run(output_mask))
def masked_predict(model, data, targets):
pred = model.predict(data)
print(pred)
acc = np.argmax(pred,1)==np.argmax(targets,1)
return acc.mean()
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(n_hidden_units, activation=activation_fn, input_shape=(input_dim,)))
model.add(Dense(n_hidden_units, activation=activation_fn))
model.add(Dense(output_dim, kernel_initializer='zero', activation=masked_softmax))
from pathint import protocols
from pathint.optimizers import KOOptimizer
from keras.optimizers import Adam, RMSprop,SGD
from keras.callbacks import Callback
from pathint.keras_utils import LossHistory
#protocol_name, protocol = protocols.PATH_INT_PROTOCOL(omega_decay='sum',xi=1e-3)
protocol_name, protocol = protocols.PATH_INT_PROTOCOL(omega_decay='sum',xi=1e-3)
# protocol_name, protocol = protocols.SUM_FISHER_PROTOCOL('sum')
opt = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999)
# opt = SGD(1e-3)
# opt = RMSprop(lr=1e-3)
oopt = KOOptimizer(opt, model=model, **protocol)
model.compile(loss='categorical_crossentropy', optimizer=oopt, metrics=['accuracy'])
model._make_train_function()
saved_weights = model.get_weights()
history = LossHistory()
callbacks = [history]
datafile_name = "split_mnist_data_%s.pkl.gz"%protocol_name
###Output
_____no_output_____
###Markdown
Train!
###Code
# diag_vals = dict()
# all_evals = dict()
# data = utils.load_zipped_pickle("comparison_data_%s.pkl.gz"%protocol_name)
# returns empty dict if file not found
def run_fits(cvals, training_data, valid_data, eval_on_train_set=False, nstats=1):
acc_mean = dict()
acc_std = dict()
for cidx, cval_ in enumerate(tqdm(cvals)):
runs = []
for runid in range(nstats):
sess.run(tf.global_variables_initializer())
# model.set_weights(saved_weights)
cstuffs = []
evals = []
print("setting cval")
cval = cval_
oopt.set_strength(cval)
oopt.init_task_vars()
print("cval is", sess.run(oopt.lam))
for age, tidx in enumerate(range(n_tasks)):
print("Age %i, cval is=%f"%(age,cval))
print("settint output mask")
set_active_outputs(task_labels[age])
stuffs = model.fit(training_data[tidx][0], training_data[tidx][1], batch_size, epochs_per_task, callbacks=callbacks)
oopt.update_task_metrics(training_data[tidx][0], training_data[tidx][1], batch_size)
oopt.update_task_vars()
ftask = []
for j in range(n_tasks):
set_active_outputs(task_labels[j])
if eval_on_train_set:
f_ = masked_predict(model, training_data[j][0], training_data[j][1])
else:
f_ = masked_predict(model, valid_data[j][0], valid_data[j][1])
ftask.append(np.mean(f_))
evals.append(ftask)
cstuffs.append(stuffs)
# Re-initialize optimizater variables
if reset_optimizer:
oopt.reset_optimizer()
evals = np.array(evals)
runs.append(evals)
runs = np.array(runs)
acc_mean[cval_] = runs.mean(0)
acc_std[cval_] = runs.std(0)
return dict(mean=acc_mean, std=acc_std)
# cvals = np.concatenate(([0], np.logspace(-2, 2, 10)))
# cvals = np.concatenate(([0], np.logspace(-1, 2, 2)))
# cvals = np.concatenate(([0], np.logspace(-2, 0, 3)))
cvals = np.logspace(-3, 3, 7)#[0, 1.0, 2, 5, 10]
cvals = [0, 1.0]
print(cvals)
%%capture
recompute_data = True
if recompute_data:
data = run_fits(cvals, training_datasets, validation_datasets, eval_on_train_set=True, nstats=n_stats)
utils.save_zipped_pickle(data, datafile_name)
data = utils.load_zipped_pickle(datafile_name)
print(cvals)
cmap = plt.get_cmap('cool')
cNorm = colors.Normalize(vmin=-5, vmax=np.log(np.max(list(data['mean'].keys()))))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
print(scalarMap.get_clim())
figure(figsize=(14, 2.5))
axs = [subplot(1,n_tasks+1,1)]#, None, None]
for i in range(1, n_tasks + 1):
axs.append(subplot(1, n_tasks+1, i+1, sharex=axs[0], sharey=axs[0]))
keys = list(data['mean'].keys())
sorted_keys = np.sort(keys)
for cval in sorted_keys:
mean_vals = data['mean'][cval]
std_vals = data['std'][cval]
for j in range(n_tasks):
colorVal = scalarMap.to_rgba(np.log(cval))
# axs[j].plot(evals[:, j], c=colorVal)
axs[j].errorbar(range(n_tasks), mean_vals[:, j], yerr=std_vals[:, j]/np.sqrt(n_stats), c=colorVal)
label = "c=%g"%cval
average = mean_vals.mean(1)
axs[-1].plot(average, c=colorVal, label=label)
for i, ax in enumerate(axs):
ax.legend(loc='best')
ax.set_title((['task %d'%j for j in range(n_tasks)] + ['average'])[i])
gcf().tight_layout()
sns.despine()
plt.rc('text', usetex=False)
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.rc('axes', labelsize=8)
def simple_axis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig = plt.figure(figsize=(3.3,2.5))
ax = plt.subplot(111)
for cval in sorted_keys:
mean_stuff = []
std_stuff = []
for i in range(len(data['mean'][cval])):
mean_stuff.append(data['mean'][cval][i][:i+1].mean())
std_stuff.append(np.sqrt((data['std'][cval][i][:i+1]**2).sum())/(n_stats*np.sqrt(n_stats)))
# plot(range(1,n_tasks+1), mean_stuff, 'o-', label="c=%g"%cval)
errorbar(range(1,n_tasks+1), mean_stuff, yerr=std_stuff, fmt='o-', label="c=%g"%cval)
axhline(data['mean'][cval][0][0], linestyle='--', color='k')
xlabel('Number of tasks')
ylabel('Fraction correct')
legend(loc='best')
xlim(0.5, 5.5)
ylim(0.7, 1.02)
# grid('on')
# sns.despine()
simple_axis(ax)
fig = plt.figure(figsize=(3.3,2.0))
ax = plt.subplot(111)
plot_keys =sorted(data['mean'].keys())# [0,1]
for cval in plot_keys:
mean_stuff = []
std_stuff = []
for i in range(len(data['mean'][cval])):
mean_stuff.append(data['mean'][cval][i][:i+1].mean())
std_stuff.append(np.sqrt((data['std'][cval][i][:i+1]**2).sum())/(n_stats*np.sqrt(n_stats)))
# plot(range(1,n_tasks+1), mean_stuff, 'o-', label="c=%g"%cval)
errorbar(range(1,n_tasks+1), mean_stuff, yerr=std_stuff, fmt='o-', label="c=%g"%cval)
axhline(data['mean'][cval][0][0], linestyle=':', color='k')
xlabel('Number of tasks')
ylabel('Fraction correct')
legend(loc='best', fontsize=8)
xlim(0.5, 5.5)
plt.yticks([0.6,0.8,1.0])
ylim(0.6, 1.02)
# grid('on')
# sns.despine()
simple_axis(ax)
plt.subplots_adjust(left=.15, bottom=.18, right=.99, top=.97)
plt.savefig("split_mnist_accuracy.pdf")
figure(figsize=(7, 1.8))
axs = [subplot(1,n_tasks+1,1)]
for i in range(1,n_tasks+1):
axs.append(subplot(1, n_tasks+1, i+1, sharex=axs[0], sharey=axs[0]))
fmts = ['o', 's']
plot_keys =sorted(data['mean'].keys())
# plot_keys = [0]
print(plot_keys)
for i, cval in enumerate(plot_keys):
label = "c=%g"%cval
mean_vals = data['mean'][cval]
std_vals = data['std'][cval]
for j in range(n_tasks+1):
sca(axs[j])
errorbar_kwargs = dict(fmt="%s-"%fmts[i], markersize=5)
if j < n_tasks:
# print(j,mean_vals[:, j])
norm= np.sqrt(n_stats) # np.sqrt(n_stats) for SEM or 1 for STDEV
axs[j].errorbar(np.arange(n_tasks)+1, mean_vals[:, j], yerr=std_vals[:, j]/norm, label=label, **errorbar_kwargs)
else:
mean_stuff = []
std_stuff = []
for i in range(len(data['mean'][cval])):
mean_stuff.append(data['mean'][cval][i][:i+1].mean())
#std_stuff.append(data['mean'][cval][i][:i+1].std()/np.sqrt(n_stats))
std_stuff.append(np.sqrt((data['std'][cval][i][:i+1]**2).sum())/(n_stats*np.sqrt(n_stats)))
# plot(range(1,n_tasks+1), mean_stuff, 'o-', label="c=%g"%cval)
errorbar(range(1,n_tasks+1), mean_stuff, yerr=std_stuff, label="c=%g"%cval, **errorbar_kwargs)
plt.xticks(np.arange(5)+1)
plt.xlim((1.0,5.5))
if j == 0:
axs[j].set_yticks([0.5,1])
else:
setp(axs[j].get_yticklabels(), visible=False)
plt.ylim((0.45,1.1))
for i, ax in enumerate(axs):
if i < n_tasks:
ax.set_title((['Task %d (%d or %d)'%(j+1,task_labels[j][0], task_labels[j][1]) for j in range(n_tasks)] + ['average'])[i], fontsize=8)
else:
ax.set_title("Average", fontsize=8)
#ax.set_title((['Task %d'%(j+1) for j in xrange(n_tasks)] + ['average'])[i], fontsize=8)
# ax.axhline(0.5, linestyle=':', color='k')
ax.axhline(0.5, color='k', linestyle=':', label="chance", zorder=0)
handles, labels = axs[-1].get_legend_handles_labels()
# Reorder legend so chance is last
axs[-1].legend([handles[j] for j in [1,2,0]], [labels[j] for j in [1,2,0]], loc='lower right', fontsize=8, bbox_to_anchor=(-1.3, -.7), ncol=3, frameon=True)
# axs[-1].legend(loc='lower right', fontsize=8, bbox_to_anchor=(-1.3, -.7), ncol=3, frameon=True)
axs[0].set_xlabel("Tasks")
axs[0].set_ylabel("Accuracy")
gcf().tight_layout()
sns.despine()
plt.savefig("split_mnist_tasks.pdf")
###Output
[0, 1.0]
|
Solution/Exercise_03.ipynb | ###Markdown
CUDA Exercise 03> Vector dot product(inner product) example on GPU, only applied with single thread. This Jupyter Notebook can also be open by the google colab, so you don't have to buy a PC with a graphic card to play with CUDA. To launch the Google Colab, please click the below Icon.[](https://colab.research.google.com/github/SuperChange001/CUDA_Learning/blob/main/Solution/Exercise_03.ipynb) Initialize the CUDA dev environment
###Code
# clone the code repo,
!pip install git+git://github.com/depctg/nvcc4jupyter.git
%load_ext nvcc_plugin
# Check the environment
!lsb_release -a
!nvcc --version
!nvidia-smi
###Output
Collecting git+git://github.com/depctg/nvcc4jupyter.git
Cloning git://github.com/depctg/nvcc4jupyter.git to /tmp/pip-req-build-dcn3mih6
Running command git clone -q git://github.com/depctg/nvcc4jupyter.git /tmp/pip-req-build-dcn3mih6
Building wheels for collected packages: NVCCPlugin
Building wheel for NVCCPlugin (setup.py) ... [?25l[?25hdone
Created wheel for NVCCPlugin: filename=NVCCPlugin-0.0.2-cp37-none-any.whl size=4334 sha256=502f57f1df304061f8b68db3c23567f7917f40794f6bdf2e09e21eef86af5570
Stored in directory: /tmp/pip-ephem-wheel-cache-mk6amdyq/wheels/1e/43/2d/099cad2b9b02dfa88573f50a22735d8a0b2ba69bf82167b81c
Successfully built NVCCPlugin
Installing collected packages: NVCCPlugin
Successfully installed NVCCPlugin-0.0.2
Default out bin result.out
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 18.04.5 LTS
Release: 18.04
Codename: bionic
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2020 NVIDIA Corporation
Built on Wed_Jul_22_19:09:09_PDT_2020
Cuda compilation tools, release 11.0, V11.0.221
Build cuda_11.0_bu.TC445_37.28845127_0
Thu Apr 22 21:12:57 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 465.19.01 Driver Version: 460.32.03 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |
| N/A 41C P8 9W / 70W | 0MiB / 15109MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
###Markdown
Vector Dot Production
###Code
%%cu
#include <stdio.h>
#include <assert.h>
#define VECTOR_LENGTH 10
#define MAX_ERR 1e-5
__global__ void vector_dot_product(float *out, float *a, float *b, int n)
{
float sum=0;
for(int i = 0; i < n; i++)
{
sum = sum + a[i] * b[i];
}
*out = sum;
}
void test_vector_dot_product(void)
{
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// Allocate memory on CPU
a = (float*)malloc(sizeof(float) * VECTOR_LENGTH);
b = (float*)malloc(sizeof(float) * VECTOR_LENGTH);
out = (float*)malloc(sizeof(float));
// data initializtion
for(int i = 0; i < VECTOR_LENGTH; i++)
{
a[i] = 3.14f;
b[i] = 2.0f;
}
// Allocate memory on GPU
cudaMalloc((void**)&d_a, sizeof(float) * VECTOR_LENGTH);
cudaMalloc((void**)&d_b, sizeof(float) * VECTOR_LENGTH);
cudaMalloc((void**)&d_out, sizeof(float));
// copy operator to GPU
cudaMemcpy(d_a, a, sizeof(float) * VECTOR_LENGTH, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * VECTOR_LENGTH, cudaMemcpyHostToDevice);
// GPU do the work, CPU waits
vector_dot_product<<<1,1>>>(d_out, d_a, d_b, VECTOR_LENGTH);
// Get results from the GPU
cudaMemcpy(out, d_out, sizeof(float),
cudaMemcpyDeviceToHost);
// Test the result
assert(fabs(*out - 20*3.14) < MAX_ERR);
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
// Free the memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
free(a);
free(b);
free(out);
}
int main()
{
test_vector_dot_product();
}
###Output
_____no_output_____ |
courses/machine_learning/deepdive2/how_google_does_ml/labs/inclusive_ml.ipynb | ###Markdown
Inclusive ML - Understanding Bias**Learning Objectives**In this lab, you use a Juypter Notebook to:- Invoke the What-if Tool against a deployed Model- Explore attributes of the dataset- Examine aspects of bias in model results- Evaluate how the What-if Tool provides suggestions to remediate bias--- Introduction This notebook shows use of the [What-If Tool](https://pair-code.github.io/what-if-tool) inside of a Jupyter notebook. The What-If Tool, among many other things, allows us to explore the impacts of Fairness in model design and deployment.The notebook invokes a deployed XGBoost classifier model on the [UCI census dataset](https://archive.ics.uci.edu/ml/datasets/census+income) which predicts whether a person earns more than $50K based on their census information.You will then visualize the results of the trained classifier on test data using the What-If Tool. ___ Set up the notebook environmentFirst you must perform a few environment and project configuration steps. __VERY IMPORTANT__: In the cell below you must replace the text 'QWIKLABSPROJECT' with your Qwiklabs Project Name as provided during the setup of your environment. Please leave any surrounding single quotes in place.These steps may take 8 - 10 minutes, please wait until you see the following response before proceeding: "__Creating version (this might take a few minutes)......done.__"
###Code
# Lab Setup
# TODO Replace the name QWIKLABSPROJECT with your Qwiklabs Project Name, leave single quotes in place
GCP_PROJECT = 'QWIKLABSPROJECT' #TODO
!gsutil mb -p $GCP_PROJECT gs://$GCP_PROJECT
MODEL_BUCKET = 'gs://QWIKLABSPROJECT' #TODO
!gsutil cp gs://cloud-training-demos/mlfairness/model.bst $MODEL_BUCKET
!gcloud config set project $GCP_PROJECT
!gcloud ai-platform models create model
!gcloud ai-platform versions create 'v1' \
--model=model \
--framework='XGBOOST' \
--runtime-version=1.14 \
--origin=$MODEL_BUCKET \
--python-version=3.5 \
--project=$GCP_PROJECT
print("Cell Successfully Complete")
###Output
_____no_output_____
###Markdown
---Next import the libraries needed for the lab.
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import witwidget
from witwidget.notebook.visualization import WitWidget, WitConfigBuilder
print("Cell Successfully Complete")
###Output
_____no_output_____
###Markdown
---Finally download the data and arrays needed to use the What-if Tool.
###Code
!gsutil cp gs://cloud-training-demos/mlfairness/income.pkl .
!gsutil cp gs://cloud-training-demos/mlfairness/x_test.npy .
!gsutil cp gs://cloud-training-demos/mlfairness/y_test.npy .
features = pd.read_pickle('income.pkl')
x_test = np.load('x_test.npy')
y_test = np.load('y_test.npy')
print("Cell Successfully Complete")
###Output
_____no_output_____
###Markdown
---Now take a quick look at the data. The ML model type used for this analysis is XGBoost. XGBoost is a machine learning framework that uses decision trees and gradient boosting to build predictive models. It works by ensembling multiple decision trees together based on the score associated with different leaf nodes in a tree. XGBoost requires all values to be numeric so the orginial dataset was modified slightly. The biggest change made was to assign a numeric value to Sex. The originial dataset only had the values "Female" and "Male" for Sex. The decision was made to assign the value "1" to Female and "2" to Male. As part of the data prepartion effort the Pandas function "get_dummies" was used to convert the remaining domain values into numerical equiavlents. For instance the "Education" column was turned into several sub-columns named after the value in the column. For instance the "Education_HS-grad" has a value of "1" for when that was the orginial categorical value and a value of "0" for other cateogries.
###Code
features.head()
###Output
_____no_output_____
###Markdown
---To connect the What-if Tool to an AI Platform model, you need to pass it a subset of your test examples. The commannd below will create a Numpy array of 2000 from our test examples.
###Code
# Combine the features and labels into one array for the What-if Tool
num_wit_examples = 2000
test_examples = np.hstack((x_test[:num_wit_examples],y_test[:num_wit_examples].reshape(-1,1)))
print("Cell Successfully Complete")
###Output
_____no_output_____
###Markdown
---Instantiating the What-if Tool is as simple as creating a WitConfigBuilder object and passing it the AI Platform model desired to be analyzed.The optional "adjust_prediction" parameter is used because the What-if Tool expects a list of scores for each class in our model (in this case 2). Since the model only returns a single value from 0 to 1, it must be transformed to the correct format in this function. Lastly, the name 'income_prediction' is used as the ground truth label.It may take 1 to 2 minutes for the What-if Tool to load and render the visualization palette, please be patient.
###Code
def adjust_prediction(pred):
return [1 - pred, pred]
config_builder = (WitConfigBuilder(test_examples.tolist(), features.columns.tolist() + ['income_prediction'])
.set_ai_platform_model('QWIKLABSPROJECT', 'model', 'v1', adjust_prediction=adjust_prediction)
.set_target_feature('income_prediction')
.set_label_vocab(['low', 'high']))
WitWidget(config_builder, height=800)
###Output
_____no_output_____ |
_posts/python-v3/ipython-notebooks/survival_analysis.ipynb | ###Markdown
In this notebook, we introduce survival analysis and we show application examples using both R and Python. We will compare the two programming languages, and leverage Plotly's Python and R APIs to convert static graphics into interactive `plotly` objects.[Plotly](https://plotly.com) is a platform for making interactive graphs with R, Python, MATLAB, and Excel. You can make graphs and analyze data on Plotly’s free public cloud. For collaboration and sensitive data, you can run Plotly [on your own servers](https://plotly.com/product/enterprise/).For a more in-depth theoretical background in survival analysis, please refer to these sources:- [Lecture Notes by John Fox](http://socserv.mcmaster.ca/jfox/Courses/soc761/survival-analysis.pdf)- [Wikipedia article](http://en.wikipedia.org/wiki/Survival_analysis)- [Presentation by Kristin Sainani](www.pitt.edu/~super4/33011-34001/33051-33061.ppt)- [Lecture Notes by Germán Rodríguez](http://data.princeton.edu/wws509/notes/c7.pdf)Need help converting Plotly graphs from R or Python?- [R](https://plotly.com/r/user-guide/)- [Python](https://plotly.com/python/matplotlib-to-plotly-tutorial/)For this code to run on your machine, you will need several R and Python packages installed.- Running `sudo pip install ` from your terminal will install a Python package.- Running `install.packages("")` in your R console will install an R package.You will also need to create an account with [Plotly](https://plotly.com/feed/) to receive your API key.
###Code
# You can also install packages from within IPython!
# Install Python Packages
!pip install lifelines
!pip install rpy2
!pip install plotly
!pip install pandas
# Load extension that let us use magic function `%R`
%load_ext rpy2.ipython
# Install R packages
%R install.packages("devtools")
%R devtools::install_github("plotly/plotly.R")
%R install.packages("OIsurv")
###Output
_____no_output_____
###Markdown
Introduction [Survival analysis](http://en.wikipedia.org/wiki/Survival_analysis) is a set of statistical methods for analyzing the occurrence of events over time. It is also used to determine the relationship of co-variates to the time-to-events, and accurately compare time-to-event between two or more groups. For example:- Time to death in biological systems.- Failure time in mechanical systems.- How long can we expect a user to be on a website / service?- Time to recovery for lung cancer treatment.The statistical term 'survival analysis' is analogous to 'reliability theory' in engineering, 'duration analysis' in economics, and 'event history analysis' in sociology. The two key functions in survival analysis are the *survival function* and the *hazard function*.The **survival function**, conventionally denoted by $S$, is the probability that the event (say, death) has not occurred yet:$$S(t) = Pr(T > t),$$where $T$ denotes the time of death and $Pr$ the probability. Since $S$ is a probability, $0\leq S(t)\leq1$. Survival times are non-negative ($T \geq 0$) and, generally, $S(0) = 1$.The **hazard function** $h(t)$ is the event (death) rate at time $t$, conditional on survival until $t$ (i.e., $T \geq t$):\begin{align*}h(t) &= \lim_{\Delta t \to 0} Pr(t \leq T \leq t + \Delta t \, | \, T \geq t) \\ &= \lim_{\Delta t \to 0} \frac{Pr(t \leq T \leq t + \Delta t)}{S(t)} = \frac{p(t)}{S(t)},\end{align*}where $p$ denotes the probability density function.In practice, we do not get to observe the actual survival function of a population; we must use the observed data to estimate it. A popular estimate for the survival function $S(t)$ is the [Kaplan–Meier estimate](http://en.wikipedia.org/wiki/Kaplan–Meier_estimator):\begin{align*}\hat{S}(t) &= \prod_{t_i \leq t} \frac{n_i − d_i}{n_i}\,,\end{align*}where $d_i$ is the number of events (deaths) observed at time $t_i$ and $n_i$ is the number of subjects at risk observed at time $t_i$. Censoring Censoring is a type of missing data problem common in survival analysis. Other popular comparison methods, such as linear regression and t-tests do not accommodate for censoring. This makes survival analysis attractive for data from randomized clinical studies. In an ideal scenario, both the birth and death rates of a patient is known, which means the lifetime is known.**Right censoring** occurs when the 'death' is unknown, but it is after some known date. e.g. The 'death' occurs after the end of the study, or there was no follow-up with the patient.**Left censoring** occurs when the lifetime is known to be less than a certain duration. e.g. Unknown time of initial infection exposure when first meeting with a patient. For following analysis, we will use the [lifelines](https://github.com/CamDavidsonPilon/lifelines) library for python, and the [survival](http://cran.r-project.org/web/packages/survival/survival.pdf) package for R. We can use [rpy2](http://rpy.sourceforge.net) to execute R code in the same document as the python code.
###Code
# OIserve contains the survival package and sample datasets
%R library(OIsurv)
%R library(devtools)
%R library(plotly)
%R library(IRdisplay)
# Authenticate to plotly's api using your account
%R py <- plotly("rmdk", "0sn825k4r8")
# Load python libraries
import numpy as np
import pandas as pd
import lifelines as ll
# Plotting helpers
from IPython.display import HTML
%matplotlib inline
import matplotlib.pyplot as plt
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import *
from pylab import rcParams
rcParams['figure.figsize']=10, 5
###Output
_____no_output_____
###Markdown
Loading data into Python and RWe will be using the `tongue` dataset from the `KMsurv` package in R, then convert the data into a pandas dataframe under the same name.This data frame contains the following columns:- type: Tumor DNA profile (1=Aneuploid Tumor, 2=Diploid Tumor) - time: Time to death or on-study time, weeks- delta Death indicator (0=alive, 1=dead)
###Code
# Load in data
%R data(tongue)
# Pull data into python kernel
%Rpull tongue
# Convert into pandas dataframe
from rpy2.robjects import pandas2ri
tongue = pandas2ri.ri2py_dataframe(tongue)
###Output
_____no_output_____
###Markdown
We can now refer to `tongue` using both R and python.
###Code
%%R
summary(tongue)
tongue.describe()
###Output
_____no_output_____
###Markdown
We can even operate on R and Python within the same code cell.
###Code
%R print(mean(tongue$time))
print tongue['time'].mean()
###Output
_____no_output_____
###Markdown
In R we need to create a `Surv` object with the `Surv()` function. Most functions in the `survival` package apply methods to this object. For right-censored data, we need to pass two arguments to `Surv()`:1. a vector of times2. a vector indicating which times are observed and censored
###Code
%%R
attach(tongue)
tongue.surv <- Surv(time[type==1], delta[type==1])
tongue.surv
###Output
_____no_output_____
###Markdown
- The plus-signs identify observations that are right-censored. Estimating survival with Kaplan-Meier Using R The simplest fit estimates a survival object against an intercept. However, the `survfit()` function has several optional arguments. For example, we can change the confidence interval using `conf.int` and `conf.type`. See `help(survfit.formula)` for the comprehensive documentation.
###Code
%%R
surv.fit <- survfit(tongue.surv~1)
surv.fit
###Output
_____no_output_____
###Markdown
It is often helpful to call the `summary()` and `plot()` functions on this object.
###Code
%%R
summary(surv.fit)
%%R -h 400
plot(surv.fit, main='Kaplan-Meier estimate with 95% confidence bounds',
xlab='time', ylab='survival function')
###Output
_____no_output_____
###Markdown
Let's convert this plot into an interactive plotly object using [plotly](https://plotly.com) and [ggplot2](http://ggplot2.org). First, we will use a helper ggplot function written by [Edwin Thoen](http://www.r-statistics.com/2013/07/creating-good-looking-survival-curves-the-ggsurv-function/) to plot pretty survival distributions in R.
###Code
%%R
ggsurv <- function(s, CI = 'def', plot.cens = T, surv.col = 'gg.def',
cens.col = 'red', lty.est = 1, lty.ci = 2,
cens.shape = 3, back.white = F, xlab = 'Time',
ylab = 'Survival', main = ''){
library(ggplot2)
strata <- ifelse(is.null(s$strata) ==T, 1, length(s$strata))
stopifnot(length(surv.col) == 1 | length(surv.col) == strata)
stopifnot(length(lty.est) == 1 | length(lty.est) == strata)
ggsurv.s <- function(s, CI = 'def', plot.cens = T, surv.col = 'gg.def',
cens.col = 'red', lty.est = 1, lty.ci = 2,
cens.shape = 3, back.white = F, xlab = 'Time',
ylab = 'Survival', main = ''){
dat <- data.frame(time = c(0, s$time),
surv = c(1, s$surv),
up = c(1, s$upper),
low = c(1, s$lower),
cens = c(0, s$n.censor))
dat.cens <- subset(dat, cens != 0)
col <- ifelse(surv.col == 'gg.def', 'black', surv.col)
pl <- ggplot(dat, aes(x = time, y = surv)) +
xlab(xlab) + ylab(ylab) + ggtitle(main) +
geom_step(col = col, lty = lty.est)
pl <- if(CI == T | CI == 'def') {
pl + geom_step(aes(y = up), color = col, lty = lty.ci) +
geom_step(aes(y = low), color = col, lty = lty.ci)
} else (pl)
pl <- if(plot.cens == T & length(dat.cens) > 0){
pl + geom_point(data = dat.cens, aes(y = surv), shape = cens.shape,
col = cens.col)
} else if (plot.cens == T & length(dat.cens) == 0){
stop ('There are no censored observations')
} else(pl)
pl <- if(back.white == T) {pl + theme_bw()
} else (pl)
pl
}
ggsurv.m <- function(s, CI = 'def', plot.cens = T, surv.col = 'gg.def',
cens.col = 'red', lty.est = 1, lty.ci = 2,
cens.shape = 3, back.white = F, xlab = 'Time',
ylab = 'Survival', main = '') {
n <- s$strata
groups <- factor(unlist(strsplit(names
(s$strata), '='))[seq(2, 2*strata, by = 2)])
gr.name <- unlist(strsplit(names(s$strata), '='))[1]
gr.df <- vector('list', strata)
ind <- vector('list', strata)
n.ind <- c(0,n); n.ind <- cumsum(n.ind)
for(i in 1:strata) ind[[i]] <- (n.ind[i]+1):n.ind[i+1]
for(i in 1:strata){
gr.df[[i]] <- data.frame(
time = c(0, s$time[ ind[[i]] ]),
surv = c(1, s$surv[ ind[[i]] ]),
up = c(1, s$upper[ ind[[i]] ]),
low = c(1, s$lower[ ind[[i]] ]),
cens = c(0, s$n.censor[ ind[[i]] ]),
group = rep(groups[i], n[i] + 1))
}
dat <- do.call(rbind, gr.df)
dat.cens <- subset(dat, cens != 0)
pl <- ggplot(dat, aes(x = time, y = surv, group = group)) +
xlab(xlab) + ylab(ylab) + ggtitle(main) +
geom_step(aes(col = group, lty = group))
col <- if(length(surv.col == 1)){
scale_colour_manual(name = gr.name, values = rep(surv.col, strata))
} else{
scale_colour_manual(name = gr.name, values = surv.col)
}
pl <- if(surv.col[1] != 'gg.def'){
pl + col
} else {pl + scale_colour_discrete(name = gr.name)}
line <- if(length(lty.est) == 1){
scale_linetype_manual(name = gr.name, values = rep(lty.est, strata))
} else {scale_linetype_manual(name = gr.name, values = lty.est)}
pl <- pl + line
pl <- if(CI == T) {
if(length(surv.col) > 1 && length(lty.est) > 1){
stop('Either surv.col or lty.est should be of length 1 in order
to plot 95% CI with multiple strata')
}else if((length(surv.col) > 1 | surv.col == 'gg.def')[1]){
pl + geom_step(aes(y = up, color = group), lty = lty.ci) +
geom_step(aes(y = low, color = group), lty = lty.ci)
} else{pl + geom_step(aes(y = up, lty = group), col = surv.col) +
geom_step(aes(y = low,lty = group), col = surv.col)}
} else {pl}
pl <- if(plot.cens == T & length(dat.cens) > 0){
pl + geom_point(data = dat.cens, aes(y = surv), shape = cens.shape,
col = cens.col)
} else if (plot.cens == T & length(dat.cens) == 0){
stop ('There are no censored observations')
} else(pl)
pl <- if(back.white == T) {pl + theme_bw()
} else (pl)
pl
}
pl <- if(strata == 1) {ggsurv.s(s, CI , plot.cens, surv.col ,
cens.col, lty.est, lty.ci,
cens.shape, back.white, xlab,
ylab, main)
} else {ggsurv.m(s, CI, plot.cens, surv.col ,
cens.col, lty.est, lty.ci,
cens.shape, back.white, xlab,
ylab, main)}
pl
}
###Output
_____no_output_____
###Markdown
Voila!
###Code
%%R -h 400
p <- ggsurv(surv.fit) + theme_bw()
p
###Output
_____no_output_____
###Markdown
We have to use a workaround to render an interactive plotly object by using an iframe in the ipython kernel. This is a bit easier if you are working in an R kernel.
###Code
%%R
# Create the iframe HTML
plot.ly <- function(url) {
# Set width and height from options or default square
w <- "750"
h <- "600"
html <- paste("<center><iframe height=\"", h, "\" id=\"igraph\" scrolling=\"no\" seamless=\"seamless\"\n\t\t\t\tsrc=\"",
url, "\" width=\"", w, "\" frameBorder=\"0\"></iframe></center>", sep="")
return(html)
}
%R p <- plot.ly("https://plotly.com/~rmdk/111/survival-vs-time/")
# pass object to python kernel
%R -o p
# Render HTML
HTML(p[0])
###Output
_____no_output_____
###Markdown
The `y axis` represents the probability a patient is still alive at time $t$ weeks. We see a steep drop off within the first 100 weeks, and then observe the curve flattening. The dotted lines represent the 95% confidence intervals. Using Python We will now replicate the above steps using python. Above, we have already specified a variable `tongues` that holds the data in a pandas dataframe.
###Code
from lifelines.estimation import KaplanMeierFitter
kmf = KaplanMeierFitter()
###Output
_____no_output_____
###Markdown
The method takes the same parameters as it's R counterpart, a time vector and a vector indicating which observations are observed or censored. The model fitting sequence is similar to the [scikit-learn](http://scikit-learn.org/stable/) api.
###Code
f = tongue.type==1
T = tongue[f]['time']
C = tongue[f]['delta']
kmf.fit(T, event_observed=C)
###Output
_____no_output_____
###Markdown
To get a plot with the confidence intervals, we simply can call `plot()` on our `kmf` object.
###Code
kmf.plot(title='Tumor DNA Profile 1')
###Output
_____no_output_____
###Markdown
Now we can convert this plot to an interactive [Plotly](https://plotly.com) object. However, we will have to augment the legend and filled area manually. Once we create a helper function, the process is simple.Please see the Plotly Python [user guide](https://plotly.com/python/overview/in-%5B37%5D) for more insight on how to update plot parameters. > Don't forget you can also easily edit the chart properties using the Plotly GUI interface by clicking the "Play with this data!" link below the chart.
###Code
p = kmf.plot(ci_force_lines=True, title='Tumor DNA Profile 1 (95% CI)')
# Collect the plot object
kmf1 = plt.gcf()
def pyplot(fig, ci=True, legend=True):
# Convert mpl fig obj to plotly fig obj, resize to plotly's default
py_fig = tls.mpl_to_plotly(fig, resize=True)
# Add fill property to lower limit line
if ci == True:
style1 = dict(fill='tonexty')
# apply style
py_fig['data'][2].update(style1)
# Change color scheme to black
py_fig['data'].update(dict(line=Line(color='black')))
# change the default line type to 'step'
py_fig['data'].update(dict(line=Line(shape='hv')))
# Delete misplaced legend annotations
py_fig['layout'].pop('annotations', None)
if legend == True:
# Add legend, place it at the top right corner of the plot
py_fig['layout'].update(
showlegend=True,
legend=Legend(
x=1.05,
y=1
)
)
# Send updated figure object to Plotly, show result in notebook
return py.iplot(py_fig)
pyplot(kmf1, legend=False)
###Output
_____no_output_____
###Markdown
Multiple Types Using R Many times there are different groups contained in a single dataset. These may represent categories such as treatment groups, different species, or different manufacturing techniques. The `type` variable in the `tongues` dataset describes a patients DNA profile. Below we define a Kaplan-Meier estimate for each of these groups in R and Python.
###Code
%%R
surv.fit2 <- survfit( Surv(time, delta) ~ type)
p <- ggsurv(surv.fit2) +
ggtitle('Lifespans of different tumor DNA profile') + theme_bw()
p
###Output
_____no_output_____
###Markdown
Convert to a Plotly object.
###Code
#%R ggplotly(plt)
%R p <- plot.ly("https://plotly.com/~rmdk/173/lifespans-of-different-tumor-dna-profile/")
# pass object to python kernel
%R -o p
# Render HTML
HTML(p[0])
###Output
_____no_output_____
###Markdown
Using Python
###Code
f2 = tongue.type==2
T2 = tongue[f2]['time']
C2 = tongue[f2]['delta']
ax = plt.subplot(111)
kmf.fit(T, event_observed=C, label=['Type 1 DNA'])
kmf.survival_function_.plot(ax=ax)
kmf.fit(T2, event_observed=C2, label=['Type 2 DNA'])
kmf.survival_function_.plot(ax=ax)
plt.title('Lifespans of different tumor DNA profile')
kmf2 = plt.gcf()
###Output
_____no_output_____
###Markdown
Convert to a Plotly object.
###Code
pyplot(kmf2, ci=False)
###Output
_____no_output_____
###Markdown
Testing for Difference It looks like DNA Type 2 is potentially more deadly, or more difficult to treat compared to Type 1. However, the difference between these survival curves still does not seem dramatic. It will be useful to perform a statistical test on the different DNA profiles to see if their survival rates are significantly different.Python's *lifelines* contains methods in `lifelines.statistics`, and the R package `survival` uses a function `survdiff()`. Both functions return a p-value from a chi-squared distribution.It turns out these two DNA types do not have significantly different survival rates. Using R
###Code
%%R
survdiff(Surv(time, delta) ~ type)
###Output
_____no_output_____
###Markdown
Using Python
###Code
from lifelines.statistics import logrank_test
summary_= logrank_test(T, T2, C, C2, alpha=99)
print summary_
###Output
<lifelines.StatisticalResult:
Results
df: 1
alpha: 99
t 0: -1
test: logrank
null distribution: chi squared
__ p-value ___|__ test statistic __|____ test result ____|__ is significant __
0.09487 | 2.790 | Cannot Reject Null | False
>
###Markdown
Estimating Hazard Rates Using R To estimate the hazard function, we compute the cumulative hazard function using the [Nelson-Aalen estimator](), defined as:$$\hat{\Lambda} (t) = \sum_{t_i \leq t} \frac{d_i}{n_i}$$where $d_i$ is the number of deaths at time $t_i$ and $n_i$ is the number of susceptible individuals. Both R and Python modules use the same estimator. However, in R we will use the `-log` of the Fleming and Harrington estimator, which is equivalent to the Nelson-Aalen.
###Code
%%R
haz <- Surv(time[type==1], delta[type==1])
haz.fit <- summary(survfit(haz ~ 1), type='fh')
x <- c(haz.fit$time, 250)
y <- c(-log(haz.fit$surv), 1.474)
cum.haz <- data.frame(time=x, cumulative.hazard=y)
p <- ggplot(cum.haz, aes(time, cumulative.hazard)) + geom_step() + theme_bw() +
ggtitle('Nelson-Aalen Estimate')
p
%R p <- plot.ly("https://plotly.com/~rmdk/185/cumulativehazard-vs-time/")
# pass object to python kernel
%R -o p
# Render HTML
HTML(p[0])
###Output
_____no_output_____
###Markdown
Using Python
###Code
from lifelines.estimation import NelsonAalenFitter
naf = NelsonAalenFitter()
naf.fit(T, event_observed=C)
naf.plot(title='Nelson-Aalen Estimate')
naf.plot(ci_force_lines=True, title='Nelson-Aalen Estimate')
py_p = plt.gcf()
pyplot(py_p, legend=False)
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install publisher --upgrade
import publisher
publisher.publish(
'survival_analysis.ipynb', 'ipython-notebooks/survival-analysis-r-vs-python/',
'Survival Analysis with Plotly: R vs Python',
'An introduction to survival analysis with Plotly graphs using R, Python, and IPython notebooks',
name='Survival Analysis with Plotly')
###Output
_____no_output_____
###Markdown
In this notebook, we introduce survival analysis and we show application examples using both R and Python. We will compare the two programming languages, and leverage Plotly's Python and R APIs to convert static graphics into interactive `plotly` objects.[Plotly](https://plotly.com) is a platform for making interactive graphs with R, Python, MATLAB, and Excel. You can make graphs and analyze data on Plotly’s free public cloud. For collaboration and sensitive data, you can run Plotly [on your own servers](https://plotly.com/product/enterprise/).For a more in-depth theoretical background in survival analysis, please refer to these sources:- [Lecture Notes by John Fox](http://socserv.mcmaster.ca/jfox/Courses/soc761/survival-analysis.pdf)- [Wikipedia article](http://en.wikipedia.org/wiki/Survival_analysis)- [Presentation by Kristin Sainani](www.pitt.edu/~super4/33011-34001/33051-33061.ppt)- [Lecture Notes by Germán Rodríguez](http://data.princeton.edu/wws509/notes/c7.pdf)Need help converting Plotly graphs from R or Python?- [R](https://plotly.com/r/user-guide/)- [Python](https://plotly.com/python/matplotlib-to-plotly-tutorial/)For this code to run on your machine, you will need several R and Python packages installed.- Running `sudo pip install ` from your terminal will install a Python package.- Running `install.packages("")` in your R console will install an R package.You will also need to create an account with [Plotly](https://plotly.com/feed/) to receive your API key.
###Code
# You can also install packages from within IPython!
# Install Python Packages
!pip install lifelines
!pip install rpy2
!pip install plotly
!pip install pandas
# Load extension that let us use magic function `%R`
%load_ext rpy2.ipython
# Install R packages
%R install.packages("devtools")
%R devtools::install_github("ropensci/plotly")
%R install.packages("OIsurv")
###Output
_____no_output_____
###Markdown
Introduction [Survival analysis](http://en.wikipedia.org/wiki/Survival_analysis) is a set of statistical methods for analyzing the occurrence of events over time. It is also used to determine the relationship of co-variates to the time-to-events, and accurately compare time-to-event between two or more groups. For example:- Time to death in biological systems.- Failure time in mechanical systems.- How long can we expect a user to be on a website / service?- Time to recovery for lung cancer treatment.The statistical term 'survival analysis' is analogous to 'reliability theory' in engineering, 'duration analysis' in economics, and 'event history analysis' in sociology. The two key functions in survival analysis are the *survival function* and the *hazard function*.The **survival function**, conventionally denoted by $S$, is the probability that the event (say, death) has not occurred yet:$$S(t) = Pr(T > t),$$where $T$ denotes the time of death and $Pr$ the probability. Since $S$ is a probability, $0\leq S(t)\leq1$. Survival times are non-negative ($T \geq 0$) and, generally, $S(0) = 1$.The **hazard function** $h(t)$ is the event (death) rate at time $t$, conditional on survival until $t$ (i.e., $T \geq t$):\begin{align*}h(t) &= \lim_{\Delta t \to 0} Pr(t \leq T \leq t + \Delta t \, | \, T \geq t) \\ &= \lim_{\Delta t \to 0} \frac{Pr(t \leq T \leq t + \Delta t)}{S(t)} = \frac{p(t)}{S(t)},\end{align*}where $p$ denotes the probability density function.In practice, we do not get to observe the actual survival function of a population; we must use the observed data to estimate it. A popular estimate for the survival function $S(t)$ is the [Kaplan–Meier estimate](http://en.wikipedia.org/wiki/Kaplan–Meier_estimator):\begin{align*}\hat{S}(t) &= \prod_{t_i \leq t} \frac{n_i − d_i}{n_i}\,,\end{align*}where $d_i$ is the number of events (deaths) observed at time $t_i$ and $n_i$ is the number of subjects at risk observed at time $t_i$. Censoring Censoring is a type of missing data problem common in survival analysis. Other popular comparison methods, such as linear regression and t-tests do not accommodate for censoring. This makes survival analysis attractive for data from randomized clinical studies. In an ideal scenario, both the birth and death rates of a patient is known, which means the lifetime is known.**Right censoring** occurs when the 'death' is unknown, but it is after some known date. e.g. The 'death' occurs after the end of the study, or there was no follow-up with the patient.**Left censoring** occurs when the lifetime is known to be less than a certain duration. e.g. Unknown time of initial infection exposure when first meeting with a patient. For following analysis, we will use the [lifelines](https://github.com/CamDavidsonPilon/lifelines) library for python, and the [survival](http://cran.r-project.org/web/packages/survival/survival.pdf) package for R. We can use [rpy2](http://rpy.sourceforge.net) to execute R code in the same document as the python code.
###Code
# OIserve contains the survival package and sample datasets
%R library(OIsurv)
%R library(devtools)
%R library(plotly)
%R library(IRdisplay)
# Authenticate to plotly's api using your account
%R py <- plotly("rmdk", "0sn825k4r8")
# Load python libraries
import numpy as np
import pandas as pd
import lifelines as ll
# Plotting helpers
from IPython.display import HTML
%matplotlib inline
import matplotlib.pyplot as plt
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import *
from pylab import rcParams
rcParams['figure.figsize']=10, 5
###Output
_____no_output_____
###Markdown
Loading data into Python and RWe will be using the `tongue` dataset from the `KMsurv` package in R, then convert the data into a pandas dataframe under the same name.This data frame contains the following columns:- type: Tumor DNA profile (1=Aneuploid Tumor, 2=Diploid Tumor) - time: Time to death or on-study time, weeks- delta Death indicator (0=alive, 1=dead)
###Code
# Load in data
%R data(tongue)
# Pull data into python kernel
%Rpull tongue
# Convert into pandas dataframe
from rpy2.robjects import pandas2ri
tongue = pandas2ri.ri2py_dataframe(tongue)
###Output
_____no_output_____
###Markdown
We can now refer to `tongue` using both R and python.
###Code
%%R
summary(tongue)
tongue.describe()
###Output
_____no_output_____
###Markdown
We can even operate on R and Python within the same code cell.
###Code
%R print(mean(tongue$time))
print tongue['time'].mean()
###Output
_____no_output_____
###Markdown
In R we need to create a `Surv` object with the `Surv()` function. Most functions in the `survival` package apply methods to this object. For right-censored data, we need to pass two arguments to `Surv()`:1. a vector of times2. a vector indicating which times are observed and censored
###Code
%%R
attach(tongue)
tongue.surv <- Surv(time[type==1], delta[type==1])
tongue.surv
###Output
_____no_output_____
###Markdown
- The plus-signs identify observations that are right-censored. Estimating survival with Kaplan-Meier Using R The simplest fit estimates a survival object against an intercept. However, the `survfit()` function has several optional arguments. For example, we can change the confidence interval using `conf.int` and `conf.type`. See `help(survfit.formula)` for the comprehensive documentation.
###Code
%%R
surv.fit <- survfit(tongue.surv~1)
surv.fit
###Output
_____no_output_____
###Markdown
It is often helpful to call the `summary()` and `plot()` functions on this object.
###Code
%%R
summary(surv.fit)
%%R -h 400
plot(surv.fit, main='Kaplan-Meier estimate with 95% confidence bounds',
xlab='time', ylab='survival function')
###Output
_____no_output_____
###Markdown
Let's convert this plot into an interactive plotly object using [plotly](https://plotly.com) and [ggplot2](http://ggplot2.org). First, we will use a helper ggplot function written by [Edwin Thoen](http://www.r-statistics.com/2013/07/creating-good-looking-survival-curves-the-ggsurv-function/) to plot pretty survival distributions in R.
###Code
%%R
ggsurv <- function(s, CI = 'def', plot.cens = T, surv.col = 'gg.def',
cens.col = 'red', lty.est = 1, lty.ci = 2,
cens.shape = 3, back.white = F, xlab = 'Time',
ylab = 'Survival', main = ''){
library(ggplot2)
strata <- ifelse(is.null(s$strata) ==T, 1, length(s$strata))
stopifnot(length(surv.col) == 1 | length(surv.col) == strata)
stopifnot(length(lty.est) == 1 | length(lty.est) == strata)
ggsurv.s <- function(s, CI = 'def', plot.cens = T, surv.col = 'gg.def',
cens.col = 'red', lty.est = 1, lty.ci = 2,
cens.shape = 3, back.white = F, xlab = 'Time',
ylab = 'Survival', main = ''){
dat <- data.frame(time = c(0, s$time),
surv = c(1, s$surv),
up = c(1, s$upper),
low = c(1, s$lower),
cens = c(0, s$n.censor))
dat.cens <- subset(dat, cens != 0)
col <- ifelse(surv.col == 'gg.def', 'black', surv.col)
pl <- ggplot(dat, aes(x = time, y = surv)) +
xlab(xlab) + ylab(ylab) + ggtitle(main) +
geom_step(col = col, lty = lty.est)
pl <- if(CI == T | CI == 'def') {
pl + geom_step(aes(y = up), color = col, lty = lty.ci) +
geom_step(aes(y = low), color = col, lty = lty.ci)
} else (pl)
pl <- if(plot.cens == T & length(dat.cens) > 0){
pl + geom_point(data = dat.cens, aes(y = surv), shape = cens.shape,
col = cens.col)
} else if (plot.cens == T & length(dat.cens) == 0){
stop ('There are no censored observations')
} else(pl)
pl <- if(back.white == T) {pl + theme_bw()
} else (pl)
pl
}
ggsurv.m <- function(s, CI = 'def', plot.cens = T, surv.col = 'gg.def',
cens.col = 'red', lty.est = 1, lty.ci = 2,
cens.shape = 3, back.white = F, xlab = 'Time',
ylab = 'Survival', main = '') {
n <- s$strata
groups <- factor(unlist(strsplit(names
(s$strata), '='))[seq(2, 2*strata, by = 2)])
gr.name <- unlist(strsplit(names(s$strata), '='))[1]
gr.df <- vector('list', strata)
ind <- vector('list', strata)
n.ind <- c(0,n); n.ind <- cumsum(n.ind)
for(i in 1:strata) ind[[i]] <- (n.ind[i]+1):n.ind[i+1]
for(i in 1:strata){
gr.df[[i]] <- data.frame(
time = c(0, s$time[ ind[[i]] ]),
surv = c(1, s$surv[ ind[[i]] ]),
up = c(1, s$upper[ ind[[i]] ]),
low = c(1, s$lower[ ind[[i]] ]),
cens = c(0, s$n.censor[ ind[[i]] ]),
group = rep(groups[i], n[i] + 1))
}
dat <- do.call(rbind, gr.df)
dat.cens <- subset(dat, cens != 0)
pl <- ggplot(dat, aes(x = time, y = surv, group = group)) +
xlab(xlab) + ylab(ylab) + ggtitle(main) +
geom_step(aes(col = group, lty = group))
col <- if(length(surv.col == 1)){
scale_colour_manual(name = gr.name, values = rep(surv.col, strata))
} else{
scale_colour_manual(name = gr.name, values = surv.col)
}
pl <- if(surv.col[1] != 'gg.def'){
pl + col
} else {pl + scale_colour_discrete(name = gr.name)}
line <- if(length(lty.est) == 1){
scale_linetype_manual(name = gr.name, values = rep(lty.est, strata))
} else {scale_linetype_manual(name = gr.name, values = lty.est)}
pl <- pl + line
pl <- if(CI == T) {
if(length(surv.col) > 1 && length(lty.est) > 1){
stop('Either surv.col or lty.est should be of length 1 in order
to plot 95% CI with multiple strata')
}else if((length(surv.col) > 1 | surv.col == 'gg.def')[1]){
pl + geom_step(aes(y = up, color = group), lty = lty.ci) +
geom_step(aes(y = low, color = group), lty = lty.ci)
} else{pl + geom_step(aes(y = up, lty = group), col = surv.col) +
geom_step(aes(y = low,lty = group), col = surv.col)}
} else {pl}
pl <- if(plot.cens == T & length(dat.cens) > 0){
pl + geom_point(data = dat.cens, aes(y = surv), shape = cens.shape,
col = cens.col)
} else if (plot.cens == T & length(dat.cens) == 0){
stop ('There are no censored observations')
} else(pl)
pl <- if(back.white == T) {pl + theme_bw()
} else (pl)
pl
}
pl <- if(strata == 1) {ggsurv.s(s, CI , plot.cens, surv.col ,
cens.col, lty.est, lty.ci,
cens.shape, back.white, xlab,
ylab, main)
} else {ggsurv.m(s, CI, plot.cens, surv.col ,
cens.col, lty.est, lty.ci,
cens.shape, back.white, xlab,
ylab, main)}
pl
}
###Output
_____no_output_____
###Markdown
Voila!
###Code
%%R -h 400
p <- ggsurv(surv.fit) + theme_bw()
p
###Output
_____no_output_____
###Markdown
We have to use a workaround to render an interactive plotly object by using an iframe in the ipython kernel. This is a bit easier if you are working in an R kernel.
###Code
%%R
# Create the iframe HTML
plot.ly <- function(url) {
# Set width and height from options or default square
w <- "750"
h <- "600"
html <- paste("<center><iframe height=\"", h, "\" id=\"igraph\" scrolling=\"no\" seamless=\"seamless\"\n\t\t\t\tsrc=\"",
url, "\" width=\"", w, "\" frameBorder=\"0\"></iframe></center>", sep="")
return(html)
}
%R p <- plot.ly("https://plotly.com/~rmdk/111/survival-vs-time/")
# pass object to python kernel
%R -o p
# Render HTML
HTML(p[0])
###Output
_____no_output_____
###Markdown
The `y axis` represents the probability a patient is still alive at time $t$ weeks. We see a steep drop off within the first 100 weeks, and then observe the curve flattening. The dotted lines represent the 95% confidence intervals. Using Python We will now replicate the above steps using python. Above, we have already specified a variable `tongues` that holds the data in a pandas dataframe.
###Code
from lifelines.estimation import KaplanMeierFitter
kmf = KaplanMeierFitter()
###Output
_____no_output_____
###Markdown
The method takes the same parameters as it's R counterpart, a time vector and a vector indicating which observations are observed or censored. The model fitting sequence is similar to the [scikit-learn](http://scikit-learn.org/stable/) api.
###Code
f = tongue.type==1
T = tongue[f]['time']
C = tongue[f]['delta']
kmf.fit(T, event_observed=C)
###Output
_____no_output_____
###Markdown
To get a plot with the confidence intervals, we simply can call `plot()` on our `kmf` object.
###Code
kmf.plot(title='Tumor DNA Profile 1')
###Output
_____no_output_____
###Markdown
Now we can convert this plot to an interactive [Plotly](https://plotly.com) object. However, we will have to augment the legend and filled area manually. Once we create a helper function, the process is simple.Please see the Plotly Python [user guide](https://plotly.com/python/overview/in-%5B37%5D) for more insight on how to update plot parameters. > Don't forget you can also easily edit the chart properties using the Plotly GUI interface by clicking the "Play with this data!" link below the chart.
###Code
p = kmf.plot(ci_force_lines=True, title='Tumor DNA Profile 1 (95% CI)')
# Collect the plot object
kmf1 = plt.gcf()
def pyplot(fig, ci=True, legend=True):
# Convert mpl fig obj to plotly fig obj, resize to plotly's default
py_fig = tls.mpl_to_plotly(fig, resize=True)
# Add fill property to lower limit line
if ci == True:
style1 = dict(fill='tonexty')
# apply style
py_fig['data'][2].update(style1)
# Change color scheme to black
py_fig['data'].update(dict(line=Line(color='black')))
# change the default line type to 'step'
py_fig['data'].update(dict(line=Line(shape='hv')))
# Delete misplaced legend annotations
py_fig['layout'].pop('annotations', None)
if legend == True:
# Add legend, place it at the top right corner of the plot
py_fig['layout'].update(
showlegend=True,
legend=Legend(
x=1.05,
y=1
)
)
# Send updated figure object to Plotly, show result in notebook
return py.iplot(py_fig)
pyplot(kmf1, legend=False)
###Output
_____no_output_____
###Markdown
Multiple Types Using R Many times there are different groups contained in a single dataset. These may represent categories such as treatment groups, different species, or different manufacturing techniques. The `type` variable in the `tongues` dataset describes a patients DNA profile. Below we define a Kaplan-Meier estimate for each of these groups in R and Python.
###Code
%%R
surv.fit2 <- survfit( Surv(time, delta) ~ type)
p <- ggsurv(surv.fit2) +
ggtitle('Lifespans of different tumor DNA profile') + theme_bw()
p
###Output
_____no_output_____
###Markdown
Convert to a Plotly object.
###Code
#%R ggplotly(plt)
%R p <- plot.ly("https://plotly.com/~rmdk/173/lifespans-of-different-tumor-dna-profile/")
# pass object to python kernel
%R -o p
# Render HTML
HTML(p[0])
###Output
_____no_output_____
###Markdown
Using Python
###Code
f2 = tongue.type==2
T2 = tongue[f2]['time']
C2 = tongue[f2]['delta']
ax = plt.subplot(111)
kmf.fit(T, event_observed=C, label=['Type 1 DNA'])
kmf.survival_function_.plot(ax=ax)
kmf.fit(T2, event_observed=C2, label=['Type 2 DNA'])
kmf.survival_function_.plot(ax=ax)
plt.title('Lifespans of different tumor DNA profile')
kmf2 = plt.gcf()
###Output
_____no_output_____
###Markdown
Convert to a Plotly object.
###Code
pyplot(kmf2, ci=False)
###Output
_____no_output_____
###Markdown
Testing for Difference It looks like DNA Type 2 is potentially more deadly, or more difficult to treat compared to Type 1. However, the difference between these survival curves still does not seem dramatic. It will be useful to perform a statistical test on the different DNA profiles to see if their survival rates are significantly different.Python's *lifelines* contains methods in `lifelines.statistics`, and the R package `survival` uses a function `survdiff()`. Both functions return a p-value from a chi-squared distribution.It turns out these two DNA types do not have significantly different survival rates. Using R
###Code
%%R
survdiff(Surv(time, delta) ~ type)
###Output
_____no_output_____
###Markdown
Using Python
###Code
from lifelines.statistics import logrank_test
summary_= logrank_test(T, T2, C, C2, alpha=99)
print summary_
###Output
<lifelines.StatisticalResult:
Results
df: 1
alpha: 99
t 0: -1
test: logrank
null distribution: chi squared
__ p-value ___|__ test statistic __|____ test result ____|__ is significant __
0.09487 | 2.790 | Cannot Reject Null | False
>
###Markdown
Estimating Hazard Rates Using R To estimate the hazard function, we compute the cumulative hazard function using the [Nelson-Aalen estimator](), defined as:$$\hat{\Lambda} (t) = \sum_{t_i \leq t} \frac{d_i}{n_i}$$where $d_i$ is the number of deaths at time $t_i$ and $n_i$ is the number of susceptible individuals. Both R and Python modules use the same estimator. However, in R we will use the `-log` of the Fleming and Harrington estimator, which is equivalent to the Nelson-Aalen.
###Code
%%R
haz <- Surv(time[type==1], delta[type==1])
haz.fit <- summary(survfit(haz ~ 1), type='fh')
x <- c(haz.fit$time, 250)
y <- c(-log(haz.fit$surv), 1.474)
cum.haz <- data.frame(time=x, cumulative.hazard=y)
p <- ggplot(cum.haz, aes(time, cumulative.hazard)) + geom_step() + theme_bw() +
ggtitle('Nelson-Aalen Estimate')
p
%R p <- plot.ly("https://plotly.com/~rmdk/185/cumulativehazard-vs-time/")
# pass object to python kernel
%R -o p
# Render HTML
HTML(p[0])
###Output
_____no_output_____
###Markdown
Using Python
###Code
from lifelines.estimation import NelsonAalenFitter
naf = NelsonAalenFitter()
naf.fit(T, event_observed=C)
naf.plot(title='Nelson-Aalen Estimate')
naf.plot(ci_force_lines=True, title='Nelson-Aalen Estimate')
py_p = plt.gcf()
pyplot(py_p, legend=False)
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install publisher --upgrade
import publisher
publisher.publish(
'survival_analysis.ipynb', 'ipython-notebooks/survival-analysis-r-vs-python/',
'Survival Analysis with Plotly: R vs Python',
'An introduction to survival analysis with Plotly graphs using R, Python, and IPython notebooks',
name='Survival Analysis with Plotly')
###Output
_____no_output_____ |
3-Natural Language Processing in TensorFlow/2/assignment/C3_W2_Assignment.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
**Note:** This notebook can run using TensorFlow 2.5.0
###Code
#!pip install tensorflow==2.5.0
import csv
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# bbc-text.csv
!gdown --id 1rX10xeI3eUJmOLsc4pOPY6AnCLO8DxNj
vocab_size = 1000 # YOUR CODE HERE
embedding_dim = 16 # YOUR CODE HERE
max_length = 120 # YOUR CODE HERE
trunc_type = 'post' # YOUR CODE HERE
padding_type = 'post' # YOUR CODE HERE
oov_tok = "<oov>"# YOUR CODE HERE
training_portion = .8
sentences = []
labels = []
stopwords = [ "a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are", "as", "at", "be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "could", "did", "do", "does", "doing", "down", "during", "each", "few", "for", "from", "further", "had", "has", "have", "having", "he", "he'd", "he'll", "he's", "her", "here", "here's", "hers", "herself", "him", "himself", "his", "how", "how's", "i", "i'd", "i'll", "i'm", "i've", "if", "in", "into", "is", "it", "it's", "its", "itself", "let's", "me", "more", "most", "my", "myself", "nor", "of", "on", "once", "only", "or", "other", "ought", "our", "ours", "ourselves", "out", "over", "own", "same", "she", "she'd", "she'll", "she's", "should", "so", "some", "such", "than", "that", "that's", "the", "their", "theirs", "them", "themselves", "then", "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've", "this", "those", "through", "to", "too", "under", "until", "up", "very", "was", "we", "we'd", "we'll", "we're", "we've", "were", "what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "who's", "whom", "why", "why's", "with", "would", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself", "yourselves" ]
print(len(stopwords))
# Expected Output
# 153
with open("./bbc-text.csv", 'r') as csvfile:
### START CODE HERE
reader = csv.reader(csvfile, delimiter=',')
next(reader)
for row in reader:
labels.append(row[0])
sentence = row[1]
for s in stopwords:
token = " "+s+" "
sentence = sentence.replace(token, ' ')
# sentence = " ".join([s for s in sentence.split(' ') if s not in stopwords])
sentences.append(sentence)
### END CODE HERE
print(len(labels))
print(len(sentences))
print(sentences[0])
# Expected Output
# 2225
# 2225
# tv future hands viewers home theatre systems plasma high-definition tvs digital video recorders moving living room way people watch tv will radically different five years time. according expert panel gathered annual consumer electronics show las vegas discuss new technologies will impact one favourite pastimes. us leading trend programmes content will delivered viewers via home networks cable satellite telecoms companies broadband service providers front rooms portable devices. one talked-about technologies ces digital personal video recorders (dvr pvr). set-top boxes like us s tivo uk s sky+ system allow people record store play pause forward wind tv programmes want. essentially technology allows much personalised tv. also built-in high-definition tv sets big business japan us slower take off europe lack high-definition programming. not can people forward wind adverts can also forget abiding network channel schedules putting together a-la-carte entertainment. us networks cable satellite companies worried means terms advertising revenues well brand identity viewer loyalty channels. although us leads technology moment also concern raised europe particularly growing uptake services like sky+. happens today will see nine months years time uk adam hume bbc broadcast s futurologist told bbc news website. likes bbc no issues lost advertising revenue yet. pressing issue moment commercial uk broadcasters brand loyalty important everyone. will talking content brands rather network brands said tim hanlon brand communications firm starcom mediavest. reality broadband connections anybody can producer content. added: challenge now hard promote programme much choice. means said stacey jolna senior vice president tv guide tv group way people find content want watch simplified tv viewers. means networks us terms channels take leaf google s book search engine future instead scheduler help people find want watch. kind channel model might work younger ipod generation used taking control gadgets play them. might not suit everyone panel recognised. older generations comfortable familiar schedules channel brands know getting. perhaps not want much choice put hands mr hanlon suggested. end kids just diapers pushing buttons already - everything possible available said mr hanlon. ultimately consumer will tell market want. 50 000 new gadgets technologies showcased ces many enhancing tv-watching experience. high-definition tv sets everywhere many new models lcd (liquid crystal display) tvs launched dvr capability built instead external boxes. one example launched show humax s 26-inch lcd tv 80-hour tivo dvr dvd recorder. one us s biggest satellite tv companies directtv even launched branded dvr show 100-hours recording capability instant replay search function. set can pause rewind tv 90 hours. microsoft chief bill gates announced pre-show keynote speech partnership tivo called tivotogo means people can play recorded programmes windows pcs mobile devices. reflect increasing trend freeing multimedia people can watch want want.
len(sentences)* training_portion
train_size = int(len(sentences)* training_portion) # YOUR CODE HERE
train_sentences = sentences[0:train_size] # YOUR CODE HERE
train_labels = labels[0:train_size] # YOUR CODE HERE
validation_sentences = sentences[train_size:] # YOUR CODE HERE
validation_labels = labels[train_size:]# YOUR CODE HERE
print(train_size)
print(len(train_sentences))
print(len(train_labels))
print(len(validation_sentences))
print(len(validation_labels))
# Expected output (if training_portion=.8)
# 1780
# 1780
# 1780
# 445
# 445
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok) # YOUR CODE HERE
tokenizer.fit_on_texts(train_sentences) # YOUR CODE HERE)
word_index = tokenizer.word_index # YOUR CODE HERE
train_sequences = tokenizer.texts_to_sequences(train_sentences) # YOUR CODE HERE
train_padded = pad_sequences(train_sequences, maxlen=max_length, truncating=trunc_type, padding=padding_type) # YOUR CODE HERE
print(len(train_sequences[0]))
print(len(train_padded[0]))
print(len(train_sequences[1]))
print(len(train_padded[1]))
print(len(train_sequences[10]))
print(len(train_padded[10]))
# Expected Ouput
# 449
# 120
# 200
# 120
# 192
# 120
validation_sequences = tokenizer.texts_to_sequences(validation_sentences) # YOUR CODE HERE
validation_padded = pad_sequences(validation_sequences, maxlen=max_length, truncating=trunc_type, padding=padding_type) # YOUR CODE HERE
print(len(validation_sequences))
print(validation_padded.shape)
# Expected output
# 445
# (445, 120)
label_tokenizer = Tokenizer() # YOUR CODE HERE
label_tokenizer.fit_on_texts(train_labels) # YOUR CODE HERE)
training_label_seq = np.array(label_tokenizer.texts_to_sequences(train_labels)) # YOUR CODE HERE
validation_label_seq = np.array(label_tokenizer.texts_to_sequences(validation_labels)) # YOUR CODE HERE
print(training_label_seq[0])
print(training_label_seq[1])
print(training_label_seq[2])
print(training_label_seq.shape)
print(validation_label_seq[0])
print(validation_label_seq[1])
print(validation_label_seq[2])
print(validation_label_seq.shape)
# Expected output
# [4]
# [2]
# [1]
# (1780, 1)
# [5]
# [4]
# [3]
# (445, 1)
model = tf.keras.Sequential([
# YOUR CODE HERE
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(6, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
# Expected Output
# Layer (type) Output Shape Param #
# =================================================================
# embedding (Embedding) (None, 120, 16) 16000
# _________________________________________________________________
# global_average_pooling1d (Gl (None, 16) 0
# _________________________________________________________________
# dense (Dense) (None, 24) 408
# _________________________________________________________________
# dense_1 (Dense) (None, 6) 150
# =================================================================
# Total params: 16,558
# Trainable params: 16,558
# Non-trainable params: 0
num_epochs = 30
history = model.fit(train_padded, training_label_seq,
verbose=2,
epochs=num_epochs,
validation_data=(validation_padded, validation_label_seq)) # YOUR CODE HERE)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_sentence(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
# Expected output
# (1000, 16)
import io
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, vocab_size):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
try:
from google.colab import files
except ImportError:
pass
else:
files.download('vecs.tsv')
files.download('meta.tsv')
###Output
_____no_output_____ |
5 - RegressaoLinearMultipla.ipynb | ###Markdown
Aula sobre Regressão Linear Multipla Prof. Adriano Santos
###Code
# Importando as dependências
import numpy as np
import pandas as pd
import matplotlib.pyplot as mpl
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
%matplotlib inline
#Importando dataset
df = pd.read_csv('dados/multiplelinearregretion.csv', delimiter=',')
df.head()
# Renomeando a coluna Temperature
df.rename(columns={'R&D Spend':'RDSpende', 'Marketing Spend': 'MarketingSpend'}, inplace=True)
df.head()
# Análise de Correlação
df.corr()
# Preparando os dados
X = df.iloc[:, :-1].values # Todas as dimensões, exceto a Profit
y = df.iloc[:, -1].values
###Output
_____no_output_____
###Markdown
Etapa de Pre-processamento (feature engineering)
###Code
labelEnconding = LabelEncoder()
X[:, -1] = labelEnconding.fit_transform(X[:, -1])
X
# Dummy coding
ohe = OneHotEncoder(categorical_features=[3]) #
X = ohe.fit_transform(X).toarray() # Realizando a criação das Dummy Variables
X
# ---- Realizando o Dammy Variable Trap
X = X[:, 1:]
###Output
_____no_output_____
###Markdown
Preparação dos dados para treinamento e teste
###Code
# Seleção de amostras para treino e teste
X_treino, X_teste, y_treino, y_teste = train_test_split(X, y, test_size=0.3)
###Output
_____no_output_____
###Markdown
Criação do modelo de Regressão Linear Multipla
###Code
# Criação de modelo de Regressão Linear
regressor = LinearRegression()
regressor.fit(X_treino, y_treino)
###Output
_____no_output_____
###Markdown
Avaliação do Modelo
###Code
regressor.score(X_teste, y_teste)
###Output
_____no_output_____
###Markdown
Otimização do Modelo* Refaça o treinamento do modelo, selecionando as dimensões que você desejar. É importante que você elimine uma dimensão por vez e avalie o resultado.
###Code
# Exemplos...
X_novo = X[:, 2:4]
X_treino, X_teste, y_treino, y_teste = train_test_split(X_novo, y, test_size=0.3)
regressor = LinearRegression()
regressor.fit(X_treino, y_treino)
regressor.score(X_teste, y_teste)
###Output
_____no_output_____
###Markdown
Otimização do Modelo
###Code
# Aplicação de recursive feature elimination (RFE)
from sklearn.feature_selection import RFE
# Instanciando o objeto
seletor = RFE(regressor, 2, step=1)
# Treinando
resultado = seletor.fit(X_treino, y_treino)
# Obtendo o ranking
resultado.ranking_
###Output
_____no_output_____ |
Class/ML0101EN-Clas-Decision-Trees-drug-py-v1.ipynb | ###Markdown
Decision Trees In this lab exercise, you will learn a popular machine learning algorithm, Decision Tree. You will use this classification algorithm to build a model from historical data of patients, and their response to different medications. Then you use the trained decision tree to predict the class of a unknown patient, or to find a proper drug for a new patient. Table of contents About the dataset Downloading the Data Pre-processing Setting up the Decision Tree Modeling Prediction Evaluation Visualization Import the Following Libraries: numpy (as np) pandas DecisionTreeClassifier from sklearn.tree
###Code
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
###Output
_____no_output_____
###Markdown
About the dataset Imagine that you are a medical researcher compiling data for a study. You have collected data about a set of patients, all of whom suffered from the same illness. During their course of treatment, each patient responded to one of 5 medications, Drug A, Drug B, Drug c, Drug x and y. Part of your job is to build a model to find out which drug might be appropriate for a future patient with the same illness. The feature sets of this dataset are Age, Sex, Blood Pressure, and Cholesterol of patients, and the target is the drug that each patient responded to. It is a sample of binary classifier, and you can use the training part of the dataset to build a decision tree, and then use it to predict the class of a unknown patient, or to prescribe it to a new patient. Downloading the Data To download the data, we will use !wget to download it from IBM Object Storage.
###Code
!wget -O drug200.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/drug200.csv
###Output
_____no_output_____
###Markdown
__Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) now, read data using pandas dataframe:
###Code
my_data = pd.read_csv("drug200.csv", delimiter=",")
my_data[0:5]
###Output
_____no_output_____
###Markdown
Practice What is the size of data?
###Code
# write your code here
###Output
_____no_output_____
###Markdown
Pre-processing Using my_data as the Drug.csv data read by pandas, declare the following variables: X as the Feature Matrix (data of my_data) y as the response vector (target) Remove the column containing the target name since it doesn't contain numeric values.
###Code
X = my_data[['Age', 'Sex', 'BP', 'Cholesterol', 'Na_to_K']].values
X[0:5]
###Output
_____no_output_____
###Markdown
As you may figure out, some features in this dataset are categorical such as __Sex__ or __BP__. Unfortunately, Sklearn Decision Trees do not handle categorical variables. But still we can convert these features to numerical values. __pandas.get_dummies()__Convert categorical variable into dummy/indicator variables.
###Code
from sklearn import preprocessing
le_sex = preprocessing.LabelEncoder()
le_sex.fit(['F','M'])
X[:,1] = le_sex.transform(X[:,1])
le_BP = preprocessing.LabelEncoder()
le_BP.fit([ 'LOW', 'NORMAL', 'HIGH'])
X[:,2] = le_BP.transform(X[:,2])
le_Chol = preprocessing.LabelEncoder()
le_Chol.fit([ 'NORMAL', 'HIGH'])
X[:,3] = le_Chol.transform(X[:,3])
X[0:5]
###Output
_____no_output_____
###Markdown
Now we can fill the target variable.
###Code
y = my_data["Drug"]
y[0:5]
###Output
_____no_output_____
###Markdown
Setting up the Decision Tree We will be using train/test split on our decision tree. Let's import train_test_split from sklearn.cross_validation.
###Code
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Now train_test_split will return 4 different parameters. We will name them:X_trainset, X_testset, y_trainset, y_testset The train_test_split will need the parameters: X, y, test_size=0.3, and random_state=3. The X and y are the arrays required before the split, the test_size represents the ratio of the testing dataset, and the random_state ensures that we obtain the same splits.
###Code
X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.3, random_state=3)
###Output
_____no_output_____
###Markdown
PracticePrint the shape of X_trainset and y_trainset. Ensure that the dimensions match
###Code
# your code
###Output
_____no_output_____
###Markdown
Print the shape of X_testset and y_testset. Ensure that the dimensions match
###Code
# your code
###Output
_____no_output_____
###Markdown
Modeling We will first create an instance of the DecisionTreeClassifier called drugTree. Inside of the classifier, specify criterion="entropy" so we can see the information gain of each node.
###Code
drugTree = DecisionTreeClassifier(criterion="entropy", max_depth = 4)
drugTree # it shows the default parameters
###Output
_____no_output_____
###Markdown
Next, we will fit the data with the training feature matrix X_trainset and training response vector y_trainset
###Code
drugTree.fit(X_trainset,y_trainset)
###Output
_____no_output_____
###Markdown
Prediction Let's make some predictions on the testing dataset and store it into a variable called predTree.
###Code
predTree = drugTree.predict(X_testset)
###Output
_____no_output_____
###Markdown
You can print out predTree and y_testset if you want to visually compare the prediction to the actual values.
###Code
print (predTree [0:5])
print (y_testset [0:5])
###Output
_____no_output_____
###Markdown
Evaluation Next, let's import metrics from sklearn and check the accuracy of our model.
###Code
from sklearn import metrics
import matplotlib.pyplot as plt
print("DecisionTrees's Accuracy: ", metrics.accuracy_score(y_testset, predTree))
###Output
_____no_output_____
###Markdown
__Accuracy classification score__ computes subset accuracy: the set of labels predicted for a sample must exactly match the corresponding set of labels in y_true. In multilabel classification, the function returns the subset accuracy. If the entire set of predicted labels for a sample strictly match with the true set of labels, then the subset accuracy is 1.0; otherwise it is 0.0. Practice Can you calculate the accuracy score without sklearn ?
###Code
# your code here
###Output
_____no_output_____
###Markdown
Visualization Lets visualize the tree
###Code
# Notice: You might need to uncomment and install the pydotplus and graphviz libraries if you have not installed these before
# !conda install -c conda-forge pydotplus -y
# !conda install -c conda-forge python-graphviz -y
from sklearn.externals.six import StringIO
import pydotplus
import matplotlib.image as mpimg
from sklearn import tree
%matplotlib inline
dot_data = StringIO()
filename = "drugtree.png"
featureNames = my_data.columns[0:5]
targetNames = my_data["Drug"].unique().tolist()
out=tree.export_graphviz(drugTree,feature_names=featureNames, out_file=dot_data, class_names= np.unique(y_trainset), filled=True, special_characters=True,rotate=False)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png(filename)
img = mpimg.imread(filename)
plt.figure(figsize=(100, 200))
plt.imshow(img,interpolation='nearest')
###Output
_____no_output_____ |
Ex 1- Hello World.ipynb | ###Markdown
Pycon Africa - Developing Maltego Transform Using Python By Tendai Marengereke - @marengz  Maltego is a tool for interactive link analysis and data gathering. Like any large and powerful software package, it can be overwhelming at first. This workshop starts with the basics of Maltego and introduces the attendee to the tool and its transforms for network and OSINT analysis. We also explore the use of Maltego when investigating breached passwords. The main aim of this workshop is to provide an introduction to Maltego and the process of coding Maltego transforms using Python. Create a Virtual Environment I use virtualenvwrapper-win on windows or virtualenvwrapper but feel free to use what you prefer. run the following command in cmd with your virtualenv activated. mkvirtualenv pyconafrica Step 1 : Create a project folder First install the maltego-trx library by running the following commands: pip install maltego-trx After the maltego-trx library has been installed, you can use the following command to create a new project folder with the recommended layout: maltego-trx start pyconafrica The command above creates a directory named "pyconafrica" which contains the "project.py" file used to run your transforms, and the "transforms" directory that contains your transform code. This produces a directory structure as shown below. Two Example Transforms are added automatically.  Step 2 : Default Example Transforms *pyconafrica/transforms/GreetPerson.py*
###Code
from maltego_trx.entities import Phrase
from maltego_trx.transform import DiscoverableTransform
class GreetPerson(DiscoverableTransform):
"""
Returns a phrase greeting a person on the graph.
"""
@classmethod
def create_entities(cls, request, response):
person_name = request.Value
response.addEntity(Phrase, "Hi %s, nice to meet you!" % person_name)
###Output
_____no_output_____
###Markdown
This transform will not return anything to the console, but we can check that the transform has been discovered by the library using the following command in "pythonafrica" directory: inside *pyconafrica/* run the following in cmd python project.py list The above command is used to show the transforms which have been discovered by the library and are ready to use in the Maltego Client. It should output the following:
###Code
C:\pyconafrica\
λ python project.py list
= Transform Server URLs =
/run/dnstoip/: DNSToIP
/run/greetperson/: GreetPerson
= Local Transform Names =
dnstoip: DNSToIP
greetperson: GreetPerson
###Output
_____no_output_____
###Markdown
Running The Transform Server You can start the development server, by running the following command: python project.py runserver This will startup a development server that automatically reloads every time the code Adding the transform to Maltego  This will open a new wizard that guide you through the process of adding a new local transform.The first page allows you to describe the new transform. The most important details are the "Transform ID" (which must be unique) and Input Entity Type.  The next page allows you to specify the settings used to execute the transform. 
###Code
The "command" field should be the absolute path of the interpreter that will execute your transform. In this case I will use the path for my Python installation, which is C:\Users
\<username>\Envs\<env_name>\Scripts\python.exe
The "parameters" field will need to include "project.py" to tell Python that we want to execute our project file. We will also need to include the parameter "local" to tell our project it's being run locally and the transform name to run. If you are unsure what transform name to use, you can use the following command:
###Output
_____no_output_____
###Markdown
python project.py list
###Code
C:\pyconafrica\
λ python project.py list
= Transform Server URLs =
/run/dnstoip/: DNSToIP
/run/greetperson/: GreetPerson
= Local Transform Names =
dnstoip: DNSToIP
greetperson: GreetPerson
###Output
_____no_output_____ |
material/PY0101EN-4-2-WriteFile.ipynb | ###Markdown
Write and Save Files in PythonEstimated time needed: **25** minutes ObjectivesAfter completing this lab you will be able to:- Write to files using Python libraries Table of Contents Writing Files Appending Files Additional File modes Copy a File Writing Files We can open a file object using the method write() to save the text file to a list. To write the mode, argument must be set to write w. Let’s write a file Example2.txt with the line: “This is line A”
###Code
# Write line to file
exmp2 = '/resources/data/Example2.txt'
with open(exmp2, 'w') as writefile:
writefile.write("This is line Sérgio")
###Output
_____no_output_____
###Markdown
We can read the file to see if it worked:
###Code
# Read file
with open(exmp2, 'r') as testwritefile:
print(testwritefile.read())
###Output
This is line Sérgio
###Markdown
We can write multiple lines:
###Code
# Write lines to file
with open(exmp2, 'w') as writefile:
writefile.write("This is line A\n")
writefile.write("This is line B\n")
###Output
_____no_output_____
###Markdown
The method .write() works similar to the method .readline(), except instead of reading a new line it writes a new line. The process is illustrated in the figure , the different colour coding of the grid represents a new line added to the file after each method call. You can check the file to see if your results are correct
###Code
# Check whether write to file
with open(exmp2, 'r') as testwritefile:
print(testwritefile.read())
###Output
This is line A
This is line B
###Markdown
We write a list to a .txt file as follows:
###Code
# Sample list of text
Lines = ["This is line A\n", "This is line B\n", "This is line C\n"]
Lines
# Write the strings in the list to text file
with open('Example2.txt', 'w') as writefile:
for line in Lines:
print(line)
writefile.write(line)
###Output
This is line A
This is line B
This is line C
###Markdown
We can verify the file is written by reading it and printing out the values:
###Code
# Verify if writing to file is successfully executed
with open('Example2.txt', 'r') as testwritefile:
print(testwritefile.read())
###Output
This is line A
This is line B
This is line C
###Markdown
However, note that setting the mode to **w** overwrites all the existing data in the file.
###Code
with open('Example2.txt', 'w') as writefile:
writefile.write("Overwrite\n")
with open('Example2.txt', 'r') as testwritefile:
print(testwritefile.read())
###Output
Overwrite
###Markdown
Appending Files We can write to files without losing any of the existing data as follows by setting the mode argument to append **a**. you can append a new line as follows:
###Code
# Write a new line to text file
with open('Example2.txt', 'a') as testwritefile:
testwritefile.write("This is line C\n")
testwritefile.write("This is line D\n")
testwritefile.write("This is line E\n")
###Output
_____no_output_____
###Markdown
You can verify the file has changed by running the following cell:
###Code
# Verify if the new line is in the text file
with open('Example2.txt', 'r') as testwritefile:
print(testwritefile.read())
###Output
Overwrite
This is line C
This is line D
This is line E
This is line C
This is line D
This is line E
###Markdown
Additional modes It's fairly ineffecient to open the file in **a** or **w** and then reopening it in **r** to read any lines. Luckily we can access the file in the following modes:- **r+** : Reading and writing. Cannot truncate the file.- **w+** : Writing and reading. Truncates the file.- **a+** : Appending and Reading. Creates a new file, if none exists. You dont have to dwell on the specifics of each mode for this lab. Let's try out the **a+** mode:
###Code
with open('Example2.txt', 'a+') as testwritefile:
testwritefile.write("This is line E\n")
print(testwritefile.read())
###Output
###Markdown
There were no errors but read() also did not output anything. This is because of our location in the file. Most of the file methods we've looked at work in a certain location in the file. .write() writes at a certain location in the file. .read() reads at a certain location in the file and so on. You can think of this as moving your pointer around in the notepad to make changes at specific location. Opening the file in **w** is akin to opening the .txt file, moving your cursor to the beginning of the text file, writing new text and deleting everything that follows.Whereas opening the file in **a** is similiar to opening the .txt file, moving your cursor to the very end and then adding the new pieces of text. It is often very useful to know where the 'cursor' is in a file and be able to control it. The following methods allow us to do precisely this -- .tell() - returns the current position in bytes- .seek(offset,from) - changes the position by 'offset' bytes with respect to 'from'. From can take the value of 0,1,2 corresponding to beginning, relative to current position and end Now lets revisit **a+**
###Code
with open('Example2.txt', 'a+') as testwritefile:
print("Initial Location: {}".format(testwritefile.tell()))
data = testwritefile.read()
if (not data): #empty strings return false in python
print('Read nothing')
else:
print(testwritefile.read())
testwritefile.seek(0,0) # move 0 bytes from beginning.
print("\nNew Location : {}".format(testwritefile.tell()))
data = testwritefile.read()
if (not data):
print('Read nothing')
else:
print(data)
print("Location after read: {}".format(testwritefile.tell()) )
###Output
Initial Location: 115
Read nothing
New Location : 0
Overwrite
This is line C
This is line D
This is line E
This is line C
This is line D
This is line E
This is line E
Location after read: 115
###Markdown
Finally, a note on the difference between **w+** and **r+**. Both of these modes allow access to read and write methods, However opening a file in **w+** overwrites it and deletes all existing data. To work with a file on existing data, use **r+** and **a+**. While using **r+**, it can be useful to add a .truncate() method at the end of your data. This will reduce the file to your data and delete everything that follows. In the following code block, Run the code as it is first and then run it with the .truncate().
###Code
with open('Example2.txt', 'r+') as testwritefile:
data = testwritefile.readlines()
testwritefile.seek(0,0) #write at beginning of file
testwritefile.write("Line 1" + "\n")
testwritefile.write("Line 2" + "\n")
testwritefile.write("Line 3" + "\n")
testwritefile.write("finished\n")
#Uncomment the line below
#testwritefile.truncate()
testwritefile.seek(0,0)
print(testwritefile.read())
###Output
Line 1
Line 2
Line 3
finished
is line D
This is line E
This is line C
This is line D
This is line E
This is line E
###Markdown
Copy a File Let's copy the file Example2.txt to the file Example3.txt:
###Code
# Copy file to another
with open('Example2.txt','r') as readfile:
with open('Example3.txt','w') as writefile:
for line in readfile:
writefile.write(line)
###Output
_____no_output_____
###Markdown
We can read the file to see if everything works:
###Code
# Verify if the copy is successfully executed
with open('Example3.txt','r') as testwritefile:
print(testwritefile.read())
###Output
Line 1
Line 2
Line 3
finished
is line D
This is line E
This is line C
This is line D
This is line E
This is line E
###Markdown
After reading files, we can also write data into files and save them in different file formats like **.txt, .csv, .xls (for excel files) etc**. You will come across these in further examples Now go to the directory to ensure the .txt file exists and contains the summary data that we wrote. Exercise Your local university's Raptors fan club maintains a register of its active members on a .txt document. Every month they update the file by removing the members who are not active. You have been tasked with automating this with your python skills. Given the file currentMem, Remove each member with a 'no' in their inactive coloumn. Keep track of each of the removed members and append them to the exMem file. Make sure the format of the original files in preserved. (_Hint: Do this by reading/writing whole lines and ensuring the header remains_ )Run the code block below prior to starting the exercise. The skeleton code has been provided for you, Edit only the cleanFiles function.
###Code
#Run this prior to starting the exercise
from random import randint as rnd
memReg = 'members.txt'
exReg = 'inactive.txt'
fee =('yes','no')
def genFiles(current,old):
with open(current,'w+') as writefile:
writefile.write('Membership No Date Joined Active \n')
data = "{:^13} {:<11} {:<6}\n"
for rowno in range(20):
date = str(rnd(2015,2020))+ '-' + str(rnd(1,12))+'-'+str(rnd(1,25))
writefile.write(data.format(rnd(10000,99999),date,fee[rnd(0,1)]))
with open(old,'w+') as writefile:
writefile.write('Membership No Date Joined Active \n')
data = "{:^13} {:<11} {:<6}\n"
for rowno in range(3):
date = str(rnd(2015,2020))+ '-' + str(rnd(1,12))+'-'+str(rnd(1,25))
writefile.write(data.format(rnd(10000,99999),date,fee[1]))
genFiles(memReg,exReg)
###Output
_____no_output_____
###Markdown
Start your solution below:
###Code
def cleanFiles(currentMem,exMem):
'''
currentMem: File containing list of current members
exMem: File containing list of old members
Removes all rows from currentMem containing 'no' and appends them to exMem
'''
pass
# Code to help you see the files
# Leave as is
memReg = 'members.txt'
exReg = 'inactive.txt'
cleanFiles(memReg,exReg)
headers = "Membership No Date Joined Active \n"
with open(memReg,'r') as readFile:
print("Active Members: \n\n")
print(readFile.read())
with open(exReg,'r') as readFile:
print("Inactive Members: \n\n")
print(readFile.read())
###Output
Active Members:
Membership No Date Joined Active
72585 2016-11-18 no
97472 2016-1-9 yes
30373 2020-8-12 yes
81685 2018-1-11 no
75404 2020-2-4 yes
19360 2016-6-17 no
12805 2018-4-15 yes
69289 2015-5-6 yes
93210 2020-11-25 yes
10601 2020-10-7 no
50186 2016-10-11 yes
92110 2017-7-23 no
19464 2019-12-5 no
14639 2015-10-15 no
44946 2019-11-25 no
50585 2019-12-2 yes
36166 2015-5-3 yes
39823 2015-6-23 no
73596 2015-10-4 no
50619 2019-1-7 no
Inactive Members:
Membership No Date Joined Active
33459 2017-8-22 no
81638 2019-8-15 no
91634 2015-11-12 no
###Markdown
Run the following to verify your code:
###Code
def testMsg(passed):
if passed:
return 'Test Passed'
else :
return 'Test Failed'
testWrite = "testWrite.txt"
testAppend = "testAppend.txt"
passed = True
genFiles(testWrite,testAppend)
with open(testWrite,'r') as file:
ogWrite = file.readlines()
with open(testAppend,'r') as file:
ogAppend = file.readlines()
try:
cleanFiles(testWrite,testAppend)
except:
print('Error')
with open(testWrite,'r') as file:
clWrite = file.readlines()
with open(testAppend,'r') as file:
clAppend = file.readlines()
# checking if total no of rows is same, including headers
if (len(ogWrite) + len(ogAppend) != len(clWrite) + len(clAppend)):
print("The number of rows do not add up. Make sure your final files have the same header and format.")
passed = False
for line in clWrite:
if 'no' in line:
passed = False
print("Inactive members in file")
break
else:
if line not in ogWrite:
print("Data in file does not match original file")
passed = False
print ("{}".format(testMsg(passed)))
###Output
_____no_output_____ |
Yandex data science/2/Week 2/.ipynb_checkpoints/sklearn.linear_model_part1-checkpoint.ipynb | ###Markdown
Sklearn sklearn.liner_model **linear_model:*** RidgeClassifier* SGDClassifier* SGDRegressor* LinearRegression* LogisticRegression* Lasso* etc документация: http://scikit-learn.org/stable/modules/classes.htmlmodule-sklearn.linear_modelпримеры: http://scikit-learn.org/stable/modules/linear_model.htmllinear-model
###Code
from matplotlib.colors import ListedColormap
from sklearn import datasets, linear_model, metrics, model_selection
import numpy as np
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Генерация данных
###Code
blobs = datasets.make_blobs(centers = 2, cluster_std = 5.5, random_state=1)
colors = ListedColormap(['red', 'blue'])
pylab.figure(figsize(8, 8))
pylab.scatter([x[0] for x in blobs[0]], [x[1] for x in blobs[0]], c=blobs[1], cmap=colors)
train_data, test_data, train_labels, test_labels = model_selection.train_test_split(blobs[0], blobs[1],
test_size = 0.3,
random_state = 1)
###Output
_____no_output_____
###Markdown
Линейная классификация RidgeClassifier
###Code
#создание объекта - классификатора
ridge_classifier = linear_model.RidgeClassifier(random_state = 1)
#обучение классификатора
ridge_classifier.fit(train_data, train_labels)
#применение обученного классификатора
ridge_predictions = ridge_classifier.predict(test_data)
print(test_labels)
print(ridge_predictions)
#оценка качества классификации
metrics.accuracy_score(test_labels, ridge_predictions)
ridge_classifier.coef_
ridge_classifier.intercept_
###Output
_____no_output_____
###Markdown
LogisticRegression
###Code
log_regressor = linear_model.LogisticRegression(random_state = 1)
log_regressor.fit(train_data, train_labels)
lr_predictions = log_regressor.predict(test_data)
lr_proba_predictions = log_regressor.predict_proba(test_data)
print(test_labels)
print(lr_predictions)
print(lr_proba_predictions)
print(metrics.accuracy_score(test_labels, lr_predictions))
print(metrics.accuracy_score(test_labels, ridge_predictions))
###Output
0.8666666666666667
###Markdown
Оценка качества по cross-validation cross_val_score
###Code
ridge_scoring = model_selection.cross_val_score(ridge_classifier, blobs[0], blobs[1], scoring = 'accuracy', cv = 10)
lr_scoring = model_selection.cross_val_score(log_regressor, blobs[0], blobs[1], scoring = 'accuracy', cv = 10)
lr_scoring
ridge_scoring
print('Ridge mean:{}, max:{}, min:{}, std:{}'.format(ridge_scoring.mean(), ridge_scoring.max(),
ridge_scoring.min(), ridge_scoring.std()))
print('Log mean:{}, max:{}, min:{}, std:{}'.format(lr_scoring.mean(), lr_scoring.max(),
lr_scoring.min(), lr_scoring.std()))
###Output
Log mean:0.8700000000000001, max:1.0, min:0.7, std:0.10049875621120892
###Markdown
cross_val_score с заданными scorer и cv_strategy
###Code
scorer = metrics.make_scorer(metrics.accuracy_score)
cv_strategy = model_selection.StratifiedShuffleSplit(n_splits = 20 , test_size = 0.3, random_state = 2)
cv_strategy.get_n_splits(blobs[1], scorer)
ridge_scoring = model_selection.cross_val_score(ridge_classifier, blobs[0], blobs[1], scoring = scorer, cv = cv_strategy)
lr_scoring = model_selection.cross_val_score(log_regressor, blobs[0], blobs[1], scoring = scorer, cv = cv_strategy)
print('Ridge mean:{}, max:{}, min:{}, std:{}'.format(ridge_scoring.mean(), ridge_scoring.max(),
ridge_scoring.min(), ridge_scoring.std()))
print('Log mean:{}, max:{}, min:{}, std:{}'.format(lr_scoring.mean(), lr_scoring.max(),
lr_scoring.min(), lr_scoring.std()))
###Output
Log mean:0.8766666666666667, max:1.0, min:0.7666666666666667, std:0.06155395104206462
|
Data Analysis Projects/iris_using_logistic_regression.ipynb | ###Markdown
Training Data
###Code
for i in range(10):
x_iris["species"] = y_iris["species"]
x_iris = shuffle(x_iris)
y_test: object
x_train, x_test, y_train, y_test = train_test_split(x_iris.iloc[:, :-1], x_iris.loc[:, "species"],
test_size=0.2, random_state=42)
model = LogisticRegression()
model.fit(x_train, y_train)
predict = model.predict(x_test)
print(accuracy_score(y_test, predict))
print("\n", classification_report(y_test, predict))
###Output
1.0
0.9666666666666667
0.8333333333333334
0.9666666666666667
0.9666666666666667
0.8666666666666667
1.0
0.9666666666666667
1.0
0.9666666666666667
precision recall f1-score support
0 1.00 1.00 1.00 9
1 1.00 0.92 0.96 13
2 0.89 1.00 0.94 8
micro avg 0.97 0.97 0.97 30
macro avg 0.96 0.97 0.97 30
weighted avg 0.97 0.97 0.97 30
|
nb/imports.ipynb | ###Markdown
Imports
###Code
from bs4 import BeautifulSoup
from cmd import Cmd
columnize = Cmd().columnize
import glob
import importlib.resources
from io import StringIO
import os
from os import chdir, curdir, listdir
from pathlib import Path
import pkg_resources
from pprint import pprint as pp
import re
import requests
import site
import sys
import zipfile
from pandas import DataFrame, Series
site.addsitedir(Path.home() / 'Development/hw-4.2.0/hw')
[str(r) for r in pkg_resources.working_set.by_key['hw'].requires()]
sys.path
###Output
_____no_output_____
###Markdown
Get a List of Python Modules
###Code
try:
src = Path("/usr/share/doc/python3.7-doc/html/py-modindex.html").read_text()
except:
try:
src = requests.get("https://docs.python.org/3/py-modindex.html").text
class BS(BeautifulSoup):
def __init__(self, s):
super().__init__(s, features="html.parser")
PYTHON_MODULES = None
try:
PYTHON_MODULES = {a.text.split('.')[0] for a in BS(src).findAll("a") if len(a.text) > 1 and not a.text[0].isupper()}
except:
path = "usr/lib/python3.8"
PYTHON_MODULES = list(sorted([n for n in listdir(path) if not n.startswith(PERIOD)], key=str.lower))
PYTHON_MODULES
###Output
_____no_output_____
###Markdown
Examine the Contents of a `.zip` File
###Code
import zipfile
###Output
_____no_output_____
###Markdown
Make a List of Python Distribution Modules
###Code
path = Series(filter(lambda p: p.exists(), # drop paths that don't exist
filter(lambda s: bool(len(s.name)), # drop the empty `str`s
map(lambda s: Path(s), # convert `str` to `Path`
sys.path
))))
path = Series([p for p in
[Path(s) for s in
[s for s in sys.path if s]
] if p.exists()
])
path
print(Path.cwd())
list(os.walk('.'))
def get_mod_names(f=curdir, output=False):
sio = StringIO()
for w in os.walk(f):
if w[0] == '.' or not Path(w[0]).stem[0] in {'.'}:
for s in w[2]:
if s.endswith('.py') and not (s.startswith('.')):
print((w[0].replace('/', '.').lstrip('.') + '.' + s).lstrip('.')[:-3], file=sio)
s = Series([t for t in sio.getvalue().split('\n') if t])
if output: print(s)
return s
get_mod_names(output=True)
def get_sys_path_mods(output=False):
mods = { 'python' : set(), 'dist' : set(), 'local' : set() }
path = Series([p for p in
[Path(s) for s in
[s for s in sys.path if s]
] if p.exists()
])
print(path)
for p in path:
n = get_mod_names(p)
get_sys_path_mods()
columnize(sorted(listdir(path[6])))
from pandas import DataFrame
DataFrame(os.walk('.'))
try:
src = requests.get("https://docs.python.org/3/py-modindex.html").text
except:
src = Path("/usr/share/doc/python3.7-doc/html").read_text()
class BS(BeautifulSoup):
def __init__(self, s):
super().__init__(s, features="html.parser")
PYTHON_MODULES = None
try:
PYTHON_MODULES = {a.text.split('.')[0] for a in BS(src).findAll("a") if len(a.text) > 1 and not a.text[0].isupper()}
except:
path = site.USER_SITE
PYTHON_MODULES = list(sorted([n for n in listdir(path) if not n.startswith(PERIOD)], key=str.lower))
PACKAGE_MODULES = sorted([re.sub(".py$", "", n) for n in listdir(site.USER_SITE) if not re.search(".*-info", n)])
help(os.walk)
def get_imports(path=Path.cwd(), recursive=True):
""" Return a `set` containing the names of the modules imported by `path`.
"""
if type(path) is str: path = Path(path)
FILES = glob.iglob(str(path / "*.py"), recursive=recursive)
LOCAL_MODULES = sorted([n for n in FILES if not n.startswith('.')], key=str.lower)
# print(f'{LOCAL_MODULES=}')
results = [set(), set(), set()]
if type(path) is str:
path = Path(path)
if path.is_dir():
for f in glob.iglob(str(path / "*.py")):
result = get_imports(f)
results = [r.union(result[i]) for i, r in enumerate(results)]
else:
result = set()
lines = path.read_text().split('\n')
regex = re.compile("\s*import (\w*)|\s*from (\w*)")
for s in lines:
m = regex.match(s)
if m:
i = 1
while not m.group(i):
i += 1
assert i < 4
word = m.group(i)
if word:
m2 = re.search(r'(\w*)\.\w*', word)
if m2:
word = m2.group(1)
result.add(word)
for r in result:
if r in PYTHON_MODULES:
results[0].add(r)
elif r in LOCAL_MODULES:
results[2].add(r)
else:
# if not r == "py"
results[1].add(r)
results = [sorted(list(r)) for r in results]
return list(map(lambda s: list(s), results))
def print_imports(path=Path.cwd()):
results = get_imports(path)
if len(results[0]):
print("Python Modules:")
print()
columnize(results[0])
print()
if len(results[1]):
print("Packages:")
print()
columnize(results[1])
print()
if len(results[2]):
print("Local Modules:")
print()
columnize(results[2])
print()
print_imports('../hw')
!pwd
###Output
_____no_output_____
###Markdown
Imports
###Code
from cmd import Cmd
from glob import glob
import importlib.resources
from io import StringIO
import json
import os
from os import chdir, curdir, listdir
from pathlib import Path
import pkg_resources
from pprint import pprint as pp
import re
import requests
import site
import sys
import zipfile
from bs4 import BeautifulSoup
from pandas import DataFrame, Series
from walkdir import filtered_walk
site.addsitedir(Path.home() / 'Development/hw-4.2.0/hw')
help(filtered_walk)
from constants import *
from tools import *
# [str(r) for r in pkg_resources.working_set.by_key['hw'].requires()]
sys.path
###Output
_____no_output_____
###Markdown
Get a List of Python Modules
###Code
try:
src = Path("/usr/share/doc/python3.7-doc/html/py-modindex.html").read_text()
except:
try:
src = requests.get("https://docs.python.org/3/py-modindex.html").text
except:
pass
class BS(BeautifulSoup):
def __init__(self, s):
super().__init__(s, features="html.parser")
PYTHON_MODULES = None
try:
PYTHON_MODULES = {a.text.split('.')[0] for a in BS(src).findAll("a") if len(a.text) > 1 and not a.text[0].isupper()}
except:
path = "/usr/lib/python3.8"
PYTHON_MODULES = list(sorted([n for n in listdir(path) if not n.startswith(PERIOD)], key=str.lower))
DataFrame(list(filtered_walk(site.USER_SITE, included_files=['*.py'])))
PYTHON_MODULES
###Output
_____no_output_____
###Markdown
Examine the Contents of a `.zip` File
###Code
import zipfile
###Output
_____no_output_____
###Markdown
Make a List of Python Distribution Modules
###Code
path = Series(filter(lambda p: p.exists(), # drop paths that don't exist
filter(lambda s: bool(len(s.name)), # drop the empty `str`s
map(lambda s: Path(s), # convert `str` to `Path`
sys.path
))))
path = Series([p for p in
[Path(s) for s in
[s for s in sys.path if s]
] if p.exists()
])
path
print(Path.cwd())
list(os.walk('.'))
def get_mod_names(f=curdir, output=False):
sio = StringIO()
for w in os.walk(f):
if w[0] == '.' or not Path(w[0]).stem[0] in {'.'}:
for s in w[2]:
if s.endswith('.py') and not (s.startswith('.')):
print((w[0].replace('/', '.').lstrip('.') + '.' + s).lstrip('.')[:-3], file=sio)
s = Series([t for t in sio.getvalue().split('\n') if t], dtype=object)
if output: print(s)
return s
get_mod_names(output=True)
def get_sys_path_mods(output=False):
mods = { 'python' : set(), 'dist' : set(), 'local' : set() }
path = Series([p for p in
[Path(s) for s in
[s for s in sys.path if s]
] if p.exists()
])
print(path)
for p in path:
n = get_mod_names(p)
get_sys_path_mods()
columnize(sorted(listdir(path[6])))
from pandas import DataFrame
DataFrame(os.walk('.'))
try:
src = requests.get("https://docs.python.org/3/py-modindex.html").text
except:
src = Path("/usr/share/doc/python3.7-doc/html").read_text()
class BS(BeautifulSoup):
def __init__(self, s):
super().__init__(s, features="html.parser")
PYTHON_MODULES = None
try:
PYTHON_MODULES = {a.text.split('.')[0] for a in BS(src).findAll("a") if len(a.text) > 1 and not a.text[0].isupper()}
except:
path = site.USER_SITE
PYTHON_MODULES = list(sorted([n for n in listdir(path) if not n.startswith(PERIOD)], key=str.lower))
PACKAGE_MODULES = sorted([re.sub(".py$", "", n) for n in listdir(site.USER_SITE) if not re.search(".*-info", n)])
help(os.walk)
def get_imports(path=Path.cwd(), recursive=True):
""" Return a `set` containing the names of the modules imported by `path`.
"""
if type(path) is str: path = Path(path)
FILES = map(lambda p: p.stem, map(lambda s: Path(s), glob(str(path / "*.py"), recursive=recursive)))
pp(FILES)
# LOCAL_MODULES = sorted([n for n in FILES if not n.startswith('.')], key=str.lower)
# print(f'{LOCAL_MODULES=}')
results = [set(), set(), set()]
if type(path) is str:
path = Path(path)
if path.is_dir():
for f in glob(str(path / "*.py")):
result = get_imports(f)
results = [r.union(result[i]) for i, r in enumerate(results)]
else:
result = set()
lines = path.read_text().split('\n')
regex = re.compile("\s*import (\w*)|\s*from (\w*)")
for s in lines:
m = regex.match(s)
if m:
i = 1
while not m.group(i):
i += 1
assert i < 4
word = m.group(i)
if word:
m2 = re.search(r'(\w*)\.\w*', word)
if m2:
word = m2.group(1)
result.add(word)
for r in result:
if r in PYTHON_MODULES:
results[0].add(r)
elif r in LOCAL_MODULES:
results[2].add(r)
else:
# if not r == "py"
results[1].add(r)
results = [sorted(list(r)) for r in results]
return list(map(lambda s: list(s), results))
def print_imports(path=Path.cwd()):
results = get_imports(path)
if len(results[0]):
print("Python Modules:")
print()
columnize(results[0])
print()
if len(results[1]):
print("Packages:")
print()
columnize(results[1])
print()
if len(results[2]):
print("Local Modules:")
print()
columnize(results[2])
print()
# print_imports('../hw')
path = BASEDIR
path.name
str(path)
!pwd
def ignore(name:str):
"""Ignore directories that start with '_' and files that start with '.' or '_'."""
# print(name)
IGNORE = {'.', '_', 'tests'}
components = name.split(os.sep)
# pp(components)
if len(components) > 1:
for s in name.split(os.sep)[:-1]:
for i in IGNORE:
if s.startswith(i): return True
p = Path(name)
if p.is_dir() and p.name.startswith('_'): return True
else:
for i in IGNORE:
if p.name.startswith(i) and not p.stem in {'__init__', '__main__'}:
return True
return False
df = DataFrame([f for f in os.walk(path, followlinks=True) if not ignore(f[0])], columns=['dir', 'subdirs', 'files'])
df
type(df['files'])
help(os.walk)
public(Series)
L = list()
for i, l in enumerate(df['files']):
L.extend([Path(df['dir'][i])/s for s in l])
L = [p for p in L if not ignore(str(p)) and p.suffix in {'.py', '.ipynb'}]
notebooks = Series(list(filter(lambda p: p.name.endswith('.ipynb'), L)), dtype=object).unique()
sources = Series(list(filter(lambda p: p.name.endswith('.py'), L)), dtype=object).unique()
pp(notebooks)
with notebooks[0].open() as f:
j = json.load(f)
pat = re.compile(r'^(from|import)')
lines = list()
for c in j['cells']:
lines.extend([l.rstrip('\n') for l in c['source'] if pat.match(l)])
lines
pat = re.compile(r'^(from|import)')
lines = list()
for nb in notebooks:
with nb.open() as f:
j = json.load(f)
for c in j['cells']:
lines.extend([l.rstrip('\n') for l in c['source'] if pat.match(l)])
lines
lines.extend(grep(*sources, pattern=pat, quiet=True))
print(len(lines))
s = set(lines)
print(len(s))
modules = [line.split()[1] for line in s]
modules
public(s)
s.intersection(PYTHON_MODULES)
pymods = set(modules).intersection(PYTHON_MODULES)
columnize(sorted(list(pymods), key=str.lower))
pkgmods = set(modules).difference(PYTHON_MODULES).difference(srcmods)
columnize(sorted(list(pkgmods), key=str.lower))
sources
src_names = set([p.name.split('.')[0] for p in sources])
srcmods = src_names.intersection(modules)
s
output = list()
for mods in [pymods, pkgmods, srcmods]:
section = list()
for line in s:
word = line.split()[1]
if word in mods:
section.append(line)
# pp(section)
# print()
output.extend(sorted(section, key=lambda s: s.split()[1].lower()))
output.append('')
output
print(Path('hw/imports.py').read_text())
Path('hw/imports.py').write_text('\n'.join(output))
public(DataFrame)
df.to_string()
print(df.to_string())
###Output
0 1 2 3 4 5
0 from glob import glob None None
1 from glob import iglob as glob
2 import glob None None None None
|
Time sequence Python/[Everyday Self Study] Time sequence Python 02_2021.04.27.tue.ipynb | ###Markdown
2장. 변수를 소개합니다 01. 변수가 무엇이죠?
###Code
x = 100
print(x)
###Output
100
###Markdown
02. 변수는 수를 저장합니다.
###Code
x = 100
x = 200
print(x)
x = 100
y = 200
sum = x+y
print(sum)
###Output
300
###Markdown
03. '='는 특별해요- 대입을 뜻함
###Code
x = 100
y = 200
x = y
print('x=',x)
print('y=',y)
score = 10
score = score + 1
print(score)
###Output
11
###Markdown
04. 변수는 문자열을 저장합니다
###Code
name1 = '홍길동'
name2 = '성춘향'
address = '서울시 종로구 1번지'
print(name1)
print(name2)
print(address)
print('23'+'45')
print(23+56)
###Output
2345
79
###Markdown
05. 변수의 이름 짓기- 규칙:\1) 영문자와 숫자, 언더바(_) 이용\2) 중간에 공백이 있어선 안됨\3) 숫자로 시작할 수 없음\4) 영문자는 대/소문자를 구별함 06. 한 번에 여러 값을 출력하기
###Code
x = 100
y = 200
sum = x+y
print(x,'과',y,'의 합은', sum,'입니다.')
###Output
100 과 200 의 합은 300 입니다.
###Markdown
07. 문자열 입력받기
###Code
a = input()
a
a = input('입력')
a
name = input('이름을 입력하시오:')
print(name, '씨, 안녕하세요?')
print("파이썬에 오신 것을 환영합니다.")
###Output
이름을 입력하시오:홍길동
홍길동 씨, 안녕하세요?
파이썬에 오신 것을 환영합니다.
###Markdown
08. 수 입력받기
###Code
x = int(input('첫 번째 정수를 입력하시오:'))
y = int(input('두 번째 정수를 입력하시오:'))
sum = x + y
print(x,'과', y, '의 합은', sum, '입니다.')
x = int(input('첫 번째 정수를 입력하시오:'))
y = int(input('두 번째 정수를 입력하시오:'))
print(x,"+", y, "=", x + y)
print(x,"-", y, "=", x - y)
print(x,"*", y, "=", x * y)
print(x,"/", y, "=", x / y)
###Output
첫 번째 정수를 입력하시오:300
두 번째 정수를 입력하시오:400
300 + 400 = 700
300 - 400 = -100
300 * 400 = 120000
300 / 400 = 0.75
###Markdown
Lab. 변수는 어디에 유용할까?
###Code
print("반지름이 10인 원의 넓이:", 10 * 10 * 3.14)
print("반지름이 20인 원의 넓이:", 20 * 20 * 3.14)
print("반지름이 30인 원의 넓이:", 30 * 30 * 3.14)
print("반지름이 40인 원의 넓이:", 40 * 40 * 3.14)
print("반지름이 50인 원의 넓이:", 50 * 50 * 3.14)
###Output
반지름이 10인 원의 넓이: 314.0
반지름이 20인 원의 넓이: 1256.0
반지름이 30인 원의 넓이: 2826.0
반지름이 40인 원의 넓이: 5024.0
반지름이 50인 원의 넓이: 7850.0
###Markdown
Lab. 내가 원하는 원 그리기
###Code
# import turtle
# t = turtle.Turtle()
# t.shape("turtle")
# radius = int(input("원의 반지름을 입력하시오: "))
# color = input("원의 색깔을 입력하시오: ")
# t.color(color)
# t.begin_fill()
# t.circle(radius)
# t.end_fill()
###Output
_____no_output_____
###Markdown
Lab. 천둥번개가 발생한 곳은 얼마나 떨어져 있나?
###Code
s = int(input("측정 시간(초) 입력:"))
d = 340 * s
print("자신의 위치에서 번개가 친 장소까지의 거리=", d, "m")
###Output
측정 시간(초) 입력:3
자신의 위치에서 번개가 친 장소까지의 거리= 1020 m
|
doc/source/ray-air/examples/torch_image_example.ipynb | ###Markdown
Training a Torch ClassifierThis tutorial demonstrates how to train an image classifier using the [Ray AI Runtime](air) (AIR).You should be familiar with [PyTorch](https://pytorch.org/) before starting the tutorial. If you need a refresher, read PyTorch's [training a classifier](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) tutorial. Before you begin* Install the [Ray AI Runtime](air). You'll need Ray 1.13 later to run this example.
###Code
!pip install 'ray[air]'
###Output
_____no_output_____
###Markdown
* Install `requests`, `torch`, and `torchvision`
###Code
!pip install requests torch torchvision
###Output
_____no_output_____
###Markdown
Load and normalize CIFAR-10We'll train our classifier on a popular image dataset called [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html).First, let's load CIFAR-10 into a Ray Dataset.
###Code
import ray
from ray.data.datasource import SimpleTorchDatasource
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
def train_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=True, transform=transform)
def test_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=False, transform=transform)
train_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=train_dataset_factory)
test_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=test_dataset_factory)
train_dataset
###Output
_____no_output_____
###Markdown
Note that {py:class}`SimpleTorchDatasource ` loads all data into memory, so you shouldn't use it with larger datasets.Next, let's represent our data using pandas dataframes instead of tuples. This lets us call methods like {py:meth}`Dataset.to_torch ` later in the tutorial.
###Code
from typing import Tuple
import pandas as pd
from ray.data.extensions import TensorArray
import torch
def convert_batch_to_pandas(batch: Tuple[torch.Tensor, int]) -> pd.DataFrame:
images = [TensorArray(image.numpy()) for image, _ in batch]
labels = [label for _, label in batch]
df = pd.DataFrame({"image": images, "label": labels})
return df
train_dataset = train_dataset.map_batches(convert_batch_to_pandas)
test_dataset = test_dataset.map_batches(convert_batch_to_pandas)
train_dataset
###Output
_____no_output_____
###Markdown
Train a convolutional neural networkNow that we've created our datasets, let's define the training logic.
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
###Output
_____no_output_____
###Markdown
We define our training logic in a function called `train_loop_per_worker`.`train_loop_per_worker` contains regular PyTorch code with a few notable exceptions:* We wrap our model with {py:func}`train.torch.prepare_model `.* We call {py:func}`train.get_dataset_shard ` and {py:meth}`Dataset.to_torch ` to convert a subset of our training data to a Torch dataset.* We save model state using {py:func}`train.save_checkpoint `.
###Code
from ray import train
import torch.optim as optim
def train_loop_per_worker(config):
model = train.torch.prepare_model(Net())
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
train_dataset_shard: torch.utils.data.Dataset = train.get_dataset_shard("train").to_torch(
feature_columns=["image"],
label_column="label",
batch_size=config["batch_size"],
unsqueeze_feature_tensors=False,
unsqueeze_label_tensor=False
)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(train_dataset_shard):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
train.save_checkpoint(model=model.module.state_dict())
###Output
_____no_output_____
###Markdown
Finally, we can train our model. This should take a few minutes to run.
###Code
from ray.train.torch import TorchTrainer
trainer = TorchTrainer(
train_loop_per_worker=train_loop_per_worker,
train_loop_config={"batch_size": 2},
datasets={"train": train_dataset},
scaling_config={"num_workers": 2}
)
result = trainer.fit()
latest_checkpoint = result.checkpoint
###Output
_____no_output_____
###Markdown
To scale your training script, create a [Ray Cluster](deployment-guide) and increase the number of workers. If your cluster contains GPUs, add `"use_gpu": True` to your scaling config.```{code-block} pythonscaling_config={"num_workers": 8, "use_gpu": True}``` Test the network on the test dataLet's see how our model performs.To classify images in the test dataset, we'll need to create a {py:class}`Predictor `.{py:class}`Predictors ` load data from checkpoints and efficiently perform inference. In contrast to {py:class}`TorchPredictor `, which performs inference on a single batch, {py:class}`BatchPredictor ` performs inference on an entire dataset. Because we want to classify all of the images in the test dataset, we'll use a {py:class}`BatchPredictor `.
###Code
from ray.train.torch import TorchPredictor
from ray.train.batch_predictor import BatchPredictor
batch_predictor = BatchPredictor.from_checkpoint(
checkpoint=latest_checkpoint,
predictor_cls=TorchPredictor,
model=Net(),
)
outputs: ray.data.Dataset = batch_predictor.predict(
data=test_dataset, feature_columns=["image"], unsqueeze=False
)
###Output
[2m[36m(BlockWorker pid=13962)[0m /GitHub/ray/python/ray/ml/utils/torch_utils.py:64: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /Users/distiller/project/pytorch/torch/csrc/utils/tensor_numpy.cpp:178.)
[2m[36m(BlockWorker pid=13962)[0m return torch.as_tensor(vals, dtype=dtype)
###Markdown
Our model outputs a list of energies for each class. To classify an image, wechoose the class that has the highest energy.
###Code
import numpy as np
def convert_logits_to_classes(df):
best_class = df["predictions"].map(lambda x: x.argmax())
df["prediction"] = best_class
return df[["prediction"]]
predictions = outputs.map_batches(
convert_logits_to_classes, batch_format="pandas"
)
predictions.show(1)
###Output
{'prediction': 3}
###Markdown
Now that we've classified all of the images, let's figure out which images wereclassified correctly. The ``predictions`` dataset contains predicted labels and the ``test_dataset`` contains the true labels. To determine whether an image was classified correctly, we join the two datasets and check if the predicted labels are the same as the actual labels.
###Code
def calculate_prediction_scores(df):
df["correct"] = df["prediction"] == df["label"]
return df[["prediction", "label", "correct"]]
scores = test_dataset.zip(predictions).map_batches(calculate_prediction_scores)
scores.show(1)
###Output
{'prediction': 3, 'label': 3, 'correct': True}
###Markdown
To compute our test accuracy, we'll count how many images the model classified correctly and divide that number by the total number of test images.
###Code
scores.sum(on="correct") / scores.count()
###Output
_____no_output_____
###Markdown
Deploy the network and make a predictionOur model seems to perform decently, so let's deploy the model to an endpoint. This'll allow us to make predictions over the Internet.
###Code
from ray import serve
from ray.serve.model_wrappers import ModelWrapperDeployment
serve.start(detached=True)
deployment = ModelWrapperDeployment.options(name="my-deployment")
deployment.deploy(TorchPredictor, latest_checkpoint, batching_params=False, model=Net())
###Output
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,630 controller 13967 checkpoint_path.py:17 - Using RayInternalKVStore for controller checkpoint and recovery.
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,633 controller 13967 http_state.py:112 - Starting HTTP proxy with name 'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:127.0.0.1-0' on node 'node:127.0.0.1-0' listening on '127.0.0.1:8000'
[2m[36m(HTTPProxyActor pid=13969)[0m INFO: Started server process [13969]
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:16,241 controller 13967 deployment_state.py:1218 - Adding 1 replicas to deployment 'my-deployment'.
###Markdown
Let's classify a test image.
###Code
batch = test_dataset.take(1)
array = np.expand_dims(np.array(batch[0]["image"]), axis=0)
array.shape
###Output
_____no_output_____
###Markdown
You can perform inference against a deployed model by posting a dictionary with an `"array"` key. To learn more about the default input schema, read the {py:class}`NdArray ` documentation.
###Code
import requests
payload = {"array": array.tolist()}
response = requests.post(deployment.url, json=payload)
response.json()
###Output
_____no_output_____
###Markdown
Training a Torch ClassifierThis tutorial demonstrates how to train an image classifier using the [Ray AI Runtime](air) (AIR).You should be familiar with [PyTorch](https://pytorch.org/) before starting the tutorial. If you need a refresher, read PyTorch's [training a classifier](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) tutorial. Before you begin* Install the [Ray AI Runtime](air). You'll need Ray 1.13 later to run this example.
###Code
!pip install 'ray[air]'
###Output
_____no_output_____
###Markdown
* Install `requests`, `torch`, and `torchvision`
###Code
!pip install requests torch torchvision
###Output
_____no_output_____
###Markdown
Load and normalize CIFAR-10We'll train our classifier on a popular image dataset called [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html).First, let's load CIFAR-10 into a Ray Dataset.
###Code
import ray
from ray.data.datasource import SimpleTorchDatasource
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
def train_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=True, transform=transform)
def test_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=False, transform=transform)
train_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=train_dataset_factory)
test_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=test_dataset_factory)
train_dataset
###Output
_____no_output_____
###Markdown
Note that {py:class}`SimpleTorchDatasource ` loads all data into memory, so you shouldn't use it with larger datasets.Next, let's represent our data using pandas dataframes instead of tuples. This lets us call methods like {py:meth}`Dataset.to_torch ` later in the tutorial.
###Code
from typing import Tuple
import pandas as pd
from ray.data.extensions import TensorArray
import torch
def convert_batch_to_pandas(batch: Tuple[torch.Tensor, int]) -> pd.DataFrame:
images = [TensorArray(image.numpy()) for image, _ in batch]
labels = [label for _, label in batch]
df = pd.DataFrame({"image": images, "label": labels})
return df
train_dataset = train_dataset.map_batches(convert_batch_to_pandas)
test_dataset = test_dataset.map_batches(convert_batch_to_pandas)
train_dataset
###Output
_____no_output_____
###Markdown
Train a convolutional neural networkNow that we've created our datasets, let's define the training logic.
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
###Output
_____no_output_____
###Markdown
We define our training logic in a function called `train_loop_per_worker`.`train_loop_per_worker` contains regular PyTorch code with a few notable exceptions:* We wrap our model with {py:func}`train.torch.prepare_model `.* We call {py:func}`train.get_dataset_shard ` and {py:meth}`Dataset.to_torch ` to convert a subset of our training data to a Torch dataset.* We save model state using {py:func}`train.save_checkpoint `.
###Code
from ray import train
import torch.optim as optim
def train_loop_per_worker(config):
model = train.torch.prepare_model(Net())
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
train_dataset_shard: torch.utils.data.Dataset = train.get_dataset_shard("train").to_torch(
feature_columns=["image"],
label_column="label",
batch_size=config["batch_size"],
unsqueeze_feature_tensors=False,
unsqueeze_label_tensor=False
)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(train_dataset_shard):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
train.save_checkpoint(model=model.module.state_dict())
###Output
_____no_output_____
###Markdown
Finally, we can train our model. This should take a few minutes to run.
###Code
from ray.air.train.integrations.torch import TorchTrainer
trainer = TorchTrainer(
train_loop_per_worker=train_loop_per_worker,
train_loop_config={"batch_size": 2},
datasets={"train": train_dataset},
scaling_config={"num_workers": 2}
)
result = trainer.fit()
latest_checkpoint = result.checkpoint
###Output
_____no_output_____
###Markdown
To scale your training script, create a [Ray Cluster](deployment-guide) and increase the number of workers. If your cluster contains GPUs, add `"use_gpu": True` to your scaling config.```{code-block} pythonscaling_config={"num_workers": 8, "use_gpu": True}``` Test the network on the test dataLet's see how our model performs.To classify images in the test dataset, we'll need to create a {py:class}`Predictor `.{py:class}`Predictors ` load data from checkpoints and efficiently perform inference. In contrast to {py:class}`TorchPredictor `, which performs inference on a single batch, {py:class}`BatchPredictor ` performs inference on an entire dataset. Because we want to classify all of the images in the test dataset, we'll use a {py:class}`BatchPredictor `.
###Code
from ray.air.predictors.integrations.torch import TorchPredictor
from ray.air.batch_predictor import BatchPredictor
batch_predictor = BatchPredictor.from_checkpoint(
checkpoint=latest_checkpoint,
predictor_cls=TorchPredictor,
model=Net(),
)
outputs: ray.data.Dataset = batch_predictor.predict(
data=test_dataset, feature_columns=["image"], unsqueeze=False
)
###Output
[2m[36m(BlockWorker pid=13962)[0m /Users/balaji/GitHub/ray/python/ray/ml/utils/torch_utils.py:64: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /Users/distiller/project/pytorch/torch/csrc/utils/tensor_numpy.cpp:178.)
[2m[36m(BlockWorker pid=13962)[0m return torch.as_tensor(vals, dtype=dtype)
###Markdown
Our model outputs a list of energies for each class. To classify an image, wechoose the class that has the highest energy.
###Code
import numpy as np
def convert_logits_to_classes(df):
best_class = df["predictions"].map(lambda x: x.argmax())
df["prediction"] = best_class
return df[["prediction"]]
predictions = outputs.map_batches(
convert_logits_to_classes, batch_format="pandas"
)
predictions.show(1)
###Output
{'prediction': 3}
###Markdown
Now that we've classified all of the images, let's figure out which images wereclassified correctly. The ``predictions`` dataset contains predicted labels and the ``test_dataset`` contains the true labels. To determine whether an image was classified correctly, we join the two datasets and check if the predicted labels are the same as the actual labels.
###Code
def calculate_prediction_scores(df):
df["correct"] = df["prediction"] == df["label"]
return df[["prediction", "label", "correct"]]
scores = test_dataset.zip(predictions).map_batches(calculate_prediction_scores)
scores.show(1)
###Output
{'prediction': 3, 'label': 3, 'correct': True}
###Markdown
To compute our test accuracy, we'll count how many images the model classified correctly and divide that number by the total number of test images.
###Code
scores.sum(on="correct") / scores.count()
###Output
_____no_output_____
###Markdown
Deploy the network and make a predictionOur model seems to perform decently, so let's deploy the model to an endpoint. This'll allow us to make predictions over the Internet.
###Code
from ray import serve
from ray.serve.model_wrappers import ModelWrapperDeployment
serve.start(detached=True)
deployment = ModelWrapperDeployment.options(name="my-deployment")
deployment.deploy(TorchPredictor, latest_checkpoint, batching_params=False, model=Net())
###Output
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,630 controller 13967 checkpoint_path.py:17 - Using RayInternalKVStore for controller checkpoint and recovery.
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,633 controller 13967 http_state.py:112 - Starting HTTP proxy with name 'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:127.0.0.1-0' on node 'node:127.0.0.1-0' listening on '127.0.0.1:8000'
[2m[36m(HTTPProxyActor pid=13969)[0m INFO: Started server process [13969]
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:16,241 controller 13967 deployment_state.py:1218 - Adding 1 replicas to deployment 'my-deployment'.
###Markdown
Let's classify a test image.
###Code
batch = test_dataset.take(1)
array = np.expand_dims(np.array(batch[0]["image"]), axis=0)
array.shape
###Output
_____no_output_____
###Markdown
You can perform inference against a deployed model by posting a dictionary with an `"array"` key. To learn more about the default input schema, read the {py:class}`NdArray ` documentation.
###Code
import requests
payload = {"array": array.tolist()}
response = requests.post(deployment.url, json=payload)
response.json()
###Output
_____no_output_____
###Markdown
Training a Torch ClassifierThis tutorial demonstrates how to train an image classifier using the [Ray AI Runtime](air) (AIR).You should be familiar with [PyTorch](https://pytorch.org/) before starting the tutorial. If you need a refresher, read PyTorch's [training a classifier](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) tutorial. Before you begin* Install the [Ray AI Runtime](air). You'll need Ray 1.13 later to run this example.
###Code
!pip install 'ray[air]'
###Output
_____no_output_____
###Markdown
* Install `requests`, `torch`, and `torchvision`
###Code
!pip install requests torch torchvision
###Output
_____no_output_____
###Markdown
Load and normalize CIFAR-10We'll train our classifier on a popular image dataset called [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html).First, let's load CIFAR-10 into a Ray Dataset.
###Code
import ray
from ray.data.datasource import SimpleTorchDatasource
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
def train_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=True, transform=transform)
def test_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=False, transform=transform)
train_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=train_dataset_factory)
test_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=test_dataset_factory)
train_dataset
###Output
_____no_output_____
###Markdown
Note that {py:class}`SimpleTorchDatasource ` loads all data into memory, so you shouldn't use it with larger datasets.Next, let's represent our data using pandas dataframes instead of tuples. This lets us call methods like {py:meth}`Dataset.to_torch ` later in the tutorial.
###Code
from typing import Tuple
import pandas as pd
from ray.data.extensions import TensorArray
import torch
def convert_batch_to_pandas(batch: Tuple[torch.Tensor, int]) -> pd.DataFrame:
images = [TensorArray(image.numpy()) for image, _ in batch]
labels = [label for _, label in batch]
df = pd.DataFrame({"image": images, "label": labels})
return df
train_dataset = train_dataset.map_batches(convert_batch_to_pandas)
test_dataset = test_dataset.map_batches(convert_batch_to_pandas)
train_dataset
###Output
_____no_output_____
###Markdown
Train a convolutional neural networkNow that we've created our datasets, let's define the training logic.
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
###Output
_____no_output_____
###Markdown
We define our training logic in a function called `train_loop_per_worker`.`train_loop_per_worker` contains regular PyTorch code with a few notable exceptions:* We wrap our model with {py:func}`train.torch.prepare_model `.* We call {py:func}`train.get_dataset_shard ` and {py:meth}`Dataset.to_torch ` to convert a subset of our training data to a Torch dataset.* We save model state using {py:func}`train.save_checkpoint `.
###Code
from ray import train
import torch.optim as optim
def train_loop_per_worker(config):
model = train.torch.prepare_model(Net())
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
train_dataset_shard: torch.utils.data.Dataset = train.get_dataset_shard("train").to_torch(
feature_columns=["image"],
label_column="label",
batch_size=config["batch_size"],
unsqueeze_feature_tensors=False,
unsqueeze_label_tensor=False
)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(train_dataset_shard):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
train.save_checkpoint(model=model.module.state_dict())
###Output
_____no_output_____
###Markdown
Finally, we can train our model. This should take a few minutes to run.
###Code
from ray.train.torch import TorchTrainer
trainer = TorchTrainer(
train_loop_per_worker=train_loop_per_worker,
train_loop_config={"batch_size": 2},
datasets={"train": train_dataset},
scaling_config={"num_workers": 2}
)
result = trainer.fit()
latest_checkpoint = result.checkpoint
###Output
_____no_output_____
###Markdown
To scale your training script, create a [Ray Cluster](deployment-guide) and increase the number of workers. If your cluster contains GPUs, add `"use_gpu": True` to your scaling config.```{code-block} pythonscaling_config={"num_workers": 8, "use_gpu": True}``` Test the network on the test dataLet's see how our model performs.To classify images in the test dataset, we'll need to create a {py:class}`Predictor `.{py:class}`Predictors ` load data from checkpoints and efficiently perform inference. In contrast to {py:class}`TorchPredictor `, which performs inference on a single batch, {py:class}`BatchPredictor ` performs inference on an entire dataset. Because we want to classify all of the images in the test dataset, we'll use a {py:class}`BatchPredictor `.
###Code
from ray.air.predictors.integrations.torch import TorchPredictor
from ray.air.batch_predictor import BatchPredictor
batch_predictor = BatchPredictor.from_checkpoint(
checkpoint=latest_checkpoint,
predictor_cls=TorchPredictor,
model=Net(),
)
outputs: ray.data.Dataset = batch_predictor.predict(
data=test_dataset, feature_columns=["image"], unsqueeze=False
)
###Output
[2m[36m(BlockWorker pid=13962)[0m /GitHub/ray/python/ray/ml/utils/torch_utils.py:64: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /Users/distiller/project/pytorch/torch/csrc/utils/tensor_numpy.cpp:178.)
[2m[36m(BlockWorker pid=13962)[0m return torch.as_tensor(vals, dtype=dtype)
###Markdown
Our model outputs a list of energies for each class. To classify an image, wechoose the class that has the highest energy.
###Code
import numpy as np
def convert_logits_to_classes(df):
best_class = df["predictions"].map(lambda x: x.argmax())
df["prediction"] = best_class
return df[["prediction"]]
predictions = outputs.map_batches(
convert_logits_to_classes, batch_format="pandas"
)
predictions.show(1)
###Output
{'prediction': 3}
###Markdown
Now that we've classified all of the images, let's figure out which images wereclassified correctly. The ``predictions`` dataset contains predicted labels and the ``test_dataset`` contains the true labels. To determine whether an image was classified correctly, we join the two datasets and check if the predicted labels are the same as the actual labels.
###Code
def calculate_prediction_scores(df):
df["correct"] = df["prediction"] == df["label"]
return df[["prediction", "label", "correct"]]
scores = test_dataset.zip(predictions).map_batches(calculate_prediction_scores)
scores.show(1)
###Output
{'prediction': 3, 'label': 3, 'correct': True}
###Markdown
To compute our test accuracy, we'll count how many images the model classified correctly and divide that number by the total number of test images.
###Code
scores.sum(on="correct") / scores.count()
###Output
_____no_output_____
###Markdown
Deploy the network and make a predictionOur model seems to perform decently, so let's deploy the model to an endpoint. This'll allow us to make predictions over the Internet.
###Code
from ray import serve
from ray.serve.model_wrappers import ModelWrapperDeployment
serve.start(detached=True)
deployment = ModelWrapperDeployment.options(name="my-deployment")
deployment.deploy(TorchPredictor, latest_checkpoint, batching_params=False, model=Net())
###Output
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,630 controller 13967 checkpoint_path.py:17 - Using RayInternalKVStore for controller checkpoint and recovery.
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,633 controller 13967 http_state.py:112 - Starting HTTP proxy with name 'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:127.0.0.1-0' on node 'node:127.0.0.1-0' listening on '127.0.0.1:8000'
[2m[36m(HTTPProxyActor pid=13969)[0m INFO: Started server process [13969]
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:16,241 controller 13967 deployment_state.py:1218 - Adding 1 replicas to deployment 'my-deployment'.
###Markdown
Let's classify a test image.
###Code
batch = test_dataset.take(1)
array = np.expand_dims(np.array(batch[0]["image"]), axis=0)
array.shape
###Output
_____no_output_____
###Markdown
You can perform inference against a deployed model by posting a dictionary with an `"array"` key. To learn more about the default input schema, read the {py:class}`NdArray ` documentation.
###Code
import requests
payload = {"array": array.tolist()}
response = requests.post(deployment.url, json=payload)
response.json()
###Output
_____no_output_____
###Markdown
Training a Torch ClassifierThis tutorial demonstrates how to train an image classifier using the [Ray AI Runtime](air) (AIR).You should be familiar with [PyTorch](https://pytorch.org/) before starting the tutorial. If you need a refresher, read PyTorch's [training a classifier](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) tutorial. Before you begin* Install the [Ray AI Runtime](air). You'll need Ray 1.13 later to run this example.
###Code
!pip install 'ray[air]'
###Output
_____no_output_____
###Markdown
* Install `requests`, `torch`, and `torchvision`
###Code
!pip install requests torch torchvision
###Output
_____no_output_____
###Markdown
Load and normalize CIFAR-10We'll train our classifier on a popular image dataset called [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html).First, let's load CIFAR-10 into a Ray Dataset.
###Code
import ray
from ray.data.datasource import SimpleTorchDatasource
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
def train_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=True, transform=transform)
def test_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=False, transform=transform)
train_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=train_dataset_factory)
test_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=test_dataset_factory)
train_dataset
###Output
_____no_output_____
###Markdown
Note that {py:class}`SimpleTorchDatasource ` loads all data into memory, so you shouldn't use it with larger datasets.Next, let's represent our data using pandas dataframes instead of tuples. This lets us call methods like {py:meth}`Dataset.to_torch ` later in the tutorial.
###Code
from typing import Tuple
import pandas as pd
from ray.data.extensions import TensorArray
import torch
def convert_batch_to_pandas(batch: Tuple[torch.Tensor, int]) -> pd.DataFrame:
images = [TensorArray(image.numpy()) for image, _ in batch]
labels = [label for _, label in batch]
df = pd.DataFrame({"image": images, "label": labels})
return df
train_dataset = train_dataset.map_batches(convert_batch_to_pandas)
test_dataset = test_dataset.map_batches(convert_batch_to_pandas)
train_dataset
###Output
_____no_output_____
###Markdown
Train a convolutional neural networkNow that we've created our datasets, let's define the training logic.
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
###Output
_____no_output_____
###Markdown
We define our training logic in a function called `train_loop_per_worker`.`train_loop_per_worker` contains regular PyTorch code with a few notable exceptions:* We wrap our model with {py:func}`train.torch.prepare_model `.* We call {py:func}`train.get_dataset_shard ` and {py:meth}`Dataset.to_torch ` to convert a subset of our training data to a Torch dataset.* We save model state using {py:func}`train.save_checkpoint `.
###Code
from ray import train
import torch.optim as optim
def train_loop_per_worker(config):
model = train.torch.prepare_model(Net())
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
train_dataset_shard: torch.utils.data.Dataset = train.get_dataset_shard("train").to_torch(
feature_columns=["image"],
label_column="label",
batch_size=config["batch_size"],
unsqueeze_feature_tensors=False,
unsqueeze_label_tensor=False
)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(train_dataset_shard):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
train.save_checkpoint(model=model.module.state_dict())
###Output
_____no_output_____
###Markdown
Finally, we can train our model. This should take a few minutes to run.
###Code
from ray.air.train.integrations.torch import TorchTrainer
trainer = TorchTrainer(
train_loop_per_worker=train_loop_per_worker,
train_loop_config={"batch_size": 2},
datasets={"train": train_dataset},
scaling_config={"num_workers": 2}
)
result = trainer.fit()
latest_checkpoint = result.checkpoint
###Output
_____no_output_____
###Markdown
To scale your training script, create a [Ray Cluster](deployment-guide) and increase the number of workers. If your cluster contains GPUs, add `"use_gpu": True` to your scaling config.```{code-block} pythonscaling_config={"num_workers": 8, "use_gpu": True}``` Test the network on the test dataLet's see how our model performs.To classify images in the test dataset, we'll need to create a {py:class}`Predictor `.{py:class}`Predictors ` load data from checkpoints and efficiently perform inference. In contrast to {py:class}`TorchPredictor `, which performs inference on a single batch, {py:class}`BatchPredictor ` performs inference on an entire dataset. Because we want to classify all of the images in the test dataset, we'll use a {py:class}`BatchPredictor `.
###Code
from ray.air.predictors.integrations.torch import TorchPredictor
from ray.air.batch_predictor import BatchPredictor
batch_predictor = BatchPredictor.from_checkpoint(
checkpoint=latest_checkpoint,
predictor_cls=TorchPredictor,
model=Net(),
)
outputs: ray.data.Dataset = batch_predictor.predict(
data=test_dataset, feature_columns=["image"], unsqueeze=False
)
###Output
[2m[36m(BlockWorker pid=13962)[0m /GitHub/ray/python/ray/ml/utils/torch_utils.py:64: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /Users/distiller/project/pytorch/torch/csrc/utils/tensor_numpy.cpp:178.)
[2m[36m(BlockWorker pid=13962)[0m return torch.as_tensor(vals, dtype=dtype)
###Markdown
Our model outputs a list of energies for each class. To classify an image, wechoose the class that has the highest energy.
###Code
import numpy as np
def convert_logits_to_classes(df):
best_class = df["predictions"].map(lambda x: x.argmax())
df["prediction"] = best_class
return df[["prediction"]]
predictions = outputs.map_batches(
convert_logits_to_classes, batch_format="pandas"
)
predictions.show(1)
###Output
{'prediction': 3}
###Markdown
Now that we've classified all of the images, let's figure out which images wereclassified correctly. The ``predictions`` dataset contains predicted labels and the ``test_dataset`` contains the true labels. To determine whether an image was classified correctly, we join the two datasets and check if the predicted labels are the same as the actual labels.
###Code
def calculate_prediction_scores(df):
df["correct"] = df["prediction"] == df["label"]
return df[["prediction", "label", "correct"]]
scores = test_dataset.zip(predictions).map_batches(calculate_prediction_scores)
scores.show(1)
###Output
{'prediction': 3, 'label': 3, 'correct': True}
###Markdown
To compute our test accuracy, we'll count how many images the model classified correctly and divide that number by the total number of test images.
###Code
scores.sum(on="correct") / scores.count()
###Output
_____no_output_____
###Markdown
Deploy the network and make a predictionOur model seems to perform decently, so let's deploy the model to an endpoint. This'll allow us to make predictions over the Internet.
###Code
from ray import serve
from ray.serve.model_wrappers import ModelWrapperDeployment
serve.start(detached=True)
deployment = ModelWrapperDeployment.options(name="my-deployment")
deployment.deploy(TorchPredictor, latest_checkpoint, batching_params=False, model=Net())
###Output
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,630 controller 13967 checkpoint_path.py:17 - Using RayInternalKVStore for controller checkpoint and recovery.
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,633 controller 13967 http_state.py:112 - Starting HTTP proxy with name 'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:127.0.0.1-0' on node 'node:127.0.0.1-0' listening on '127.0.0.1:8000'
[2m[36m(HTTPProxyActor pid=13969)[0m INFO: Started server process [13969]
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:16,241 controller 13967 deployment_state.py:1218 - Adding 1 replicas to deployment 'my-deployment'.
###Markdown
Let's classify a test image.
###Code
batch = test_dataset.take(1)
array = np.expand_dims(np.array(batch[0]["image"]), axis=0)
array.shape
###Output
_____no_output_____
###Markdown
You can perform inference against a deployed model by posting a dictionary with an `"array"` key. To learn more about the default input schema, read the {py:class}`NdArray ` documentation.
###Code
import requests
payload = {"array": array.tolist()}
response = requests.post(deployment.url, json=payload)
response.json()
###Output
_____no_output_____
###Markdown
Training a Torch ClassifierThis tutorial demonstrates how to train an image classifier using the [Ray AI Runtime](air) (AIR).You should be familiar with [PyTorch](https://pytorch.org/) before starting the tutorial. If you need a refresher, read PyTorch's [training a classifier](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) tutorial. Before you begin* Install the [Ray AI Runtime](air). You'll need Ray 1.13 later to run this example.
###Code
!pip install 'ray[air]'
###Output
_____no_output_____
###Markdown
* Install `requests`, `torch`, and `torchvision`
###Code
!pip install requests torch torchvision
###Output
_____no_output_____
###Markdown
Load and normalize CIFAR-10We'll train our classifier on a popular image dataset called [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html).First, let's load CIFAR-10 into a Ray Dataset.
###Code
import ray
from ray.data.datasource import SimpleTorchDatasource
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
def train_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=True, transform=transform)
def test_dataset_factory():
return torchvision.datasets.CIFAR10(root="./data", download=True, train=False, transform=transform)
train_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=train_dataset_factory)
test_dataset: ray.data.Dataset = ray.data.read_datasource(SimpleTorchDatasource(), dataset_factory=test_dataset_factory)
train_dataset
###Output
_____no_output_____
###Markdown
Note that {py:class}`SimpleTorchDatasource ` loads all data into memory, so you shouldn't use it with larger datasets.Next, let's represent our data using pandas dataframes instead of tuples. This lets us call methods like {py:meth}`Dataset.to_torch ` later in the tutorial.
###Code
from typing import Tuple
import pandas as pd
from ray.data.extensions import TensorArray
import torch
def convert_batch_to_pandas(batch: Tuple[torch.Tensor, int]) -> pd.DataFrame:
images = [TensorArray(image.numpy()) for image, _ in batch]
labels = [label for _, label in batch]
df = pd.DataFrame({"image": images, "label": labels})
return df
train_dataset = train_dataset.map_batches(convert_batch_to_pandas)
test_dataset = test_dataset.map_batches(convert_batch_to_pandas)
train_dataset
###Output
_____no_output_____
###Markdown
Train a convolutional neural networkNow that we've created our datasets, let's define the training logic.
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
###Output
_____no_output_____
###Markdown
We define our training logic in a function called `train_loop_per_worker`.`train_loop_per_worker` contains regular PyTorch code with a few notable exceptions:* We wrap our model with {py:func}`train.torch.prepare_model `.* We call {py:func}`train.get_dataset_shard ` and {py:meth}`Dataset.to_torch ` to convert a subset of our training data to a Torch dataset.* We save model state using {py:func}`train.save_checkpoint `.
###Code
from ray import train
import torch.optim as optim
def train_loop_per_worker(config):
model = train.torch.prepare_model(Net())
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
train_dataset_shard: torch.utils.data.Dataset = train.get_dataset_shard("train").to_torch(
feature_columns=["image"],
label_column="label",
batch_size=config["batch_size"],
unsqueeze_feature_tensors=False,
unsqueeze_label_tensor=False
)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(train_dataset_shard):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
train.save_checkpoint(model=model.module.state_dict())
###Output
_____no_output_____
###Markdown
Finally, we can train our model. This should take a few minutes to run.
###Code
from ray.ml.train.integrations.torch import TorchTrainer
trainer = TorchTrainer(
train_loop_per_worker=train_loop_per_worker,
train_loop_config={"batch_size": 2},
datasets={"train": train_dataset},
scaling_config={"num_workers": 2}
)
result = trainer.fit()
latest_checkpoint = result.checkpoint
###Output
_____no_output_____
###Markdown
To scale your training script, create a [Ray Cluster](deployment-guide) and increase the number of workers. If your cluster contains GPUs, add `"use_gpu": True` to your scaling config.```{code-block} pythonscaling_config={"num_workers": 8, "use_gpu": True}``` Test the network on the test dataLet's see how our model performs.To classify images in the test dataset, we'll need to create a {py:class}`Predictor `.{py:class}`Predictors ` load data from checkpoints and efficiently perform inference. In contrast to {py:class}`TorchPredictor `, which performs inference on a single batch, {py:class}`BatchPredictor ` performs inference on an entire dataset. Because we want to classify all of the images in the test dataset, we'll use a {py:class}`BatchPredictor `.
###Code
from ray.ml.predictors.integrations.torch import TorchPredictor
from ray.ml.batch_predictor import BatchPredictor
batch_predictor = BatchPredictor.from_checkpoint(
checkpoint=latest_checkpoint,
predictor_cls=TorchPredictor,
model=Net(),
)
outputs: ray.data.Dataset = batch_predictor.predict(
data=test_dataset, feature_columns=["image"], unsqueeze=False
)
###Output
[2m[36m(BlockWorker pid=13962)[0m /Users/balaji/GitHub/ray/python/ray/ml/utils/torch_utils.py:64: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /Users/distiller/project/pytorch/torch/csrc/utils/tensor_numpy.cpp:178.)
[2m[36m(BlockWorker pid=13962)[0m return torch.as_tensor(vals, dtype=dtype)
###Markdown
Our model outputs a list of energies for each class. To classify an image, wechoose the class that has the highest energy.
###Code
import numpy as np
def convert_logits_to_classes(df):
best_class = df["predictions"].map(lambda x: x.argmax())
df["prediction"] = best_class
return df[["prediction"]]
predictions = outputs.map_batches(
convert_logits_to_classes, batch_format="pandas"
)
predictions.show(1)
###Output
{'prediction': 3}
###Markdown
Now that we've classified all of the images, let's figure out which images wereclassified correctly. The ``predictions`` dataset contains predicted labels and the ``test_dataset`` contains the true labels. To determine whether an image was classified correctly, we join the two datasets and check if the predicted labels are the same as the actual labels.
###Code
def calculate_prediction_scores(df):
df["correct"] = df["prediction"] == df["label"]
return df[["prediction", "label", "correct"]]
scores = test_dataset.zip(predictions).map_batches(calculate_prediction_scores)
scores.show(1)
###Output
{'prediction': 3, 'label': 3, 'correct': True}
###Markdown
To compute our test accuracy, we'll count how many images the model classified correctly and divide that number by the total number of test images.
###Code
scores.sum(on="correct") / scores.count()
###Output
_____no_output_____
###Markdown
Deploy the network and make a predictionOur model seems to perform decently, so let's deploy the model to an endpoint. This'll allow us to make predictions over the Internet.
###Code
from ray import serve
from ray.serve.model_wrappers import ModelWrapperDeployment
serve.start(detached=True)
deployment = ModelWrapperDeployment.options(name="my-deployment")
deployment.deploy(TorchPredictor, latest_checkpoint, batching_params=False, model=Net())
###Output
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,630 controller 13967 checkpoint_path.py:17 - Using RayInternalKVStore for controller checkpoint and recovery.
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:14,633 controller 13967 http_state.py:112 - Starting HTTP proxy with name 'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:127.0.0.1-0' on node 'node:127.0.0.1-0' listening on '127.0.0.1:8000'
[2m[36m(HTTPProxyActor pid=13969)[0m INFO: Started server process [13969]
[2m[36m(ServeController pid=13967)[0m INFO 2022-05-26 14:52:16,241 controller 13967 deployment_state.py:1218 - Adding 1 replicas to deployment 'my-deployment'.
###Markdown
Let's classify a test image.
###Code
batch = test_dataset.take(1)
array = np.expand_dims(np.array(batch[0]["image"]), axis=0)
array.shape
###Output
_____no_output_____
###Markdown
You can perform inference against a deployed model by posting a dictionary with an `"array"` key. To learn more about the default input schema, read the {py:class}`NdArray ` documentation.
###Code
import requests
payload = {"array": array.tolist()}
response = requests.post(deployment.url, json=payload)
response.json()
###Output
_____no_output_____ |
docs/tutorials/intro.ipynb | ###Markdown
IntroductionYangify is a framework that allows you to easily write code that can map structured and non-structured data into data modelled using YANG models. Yangify can also do the opposite operation and convert data modelled with YANG models into other structured or non-structured data. This allows you to easily write code that can parse native output/data/configuration from network devices and map them into YANG models and vice-versa.In addition, it can provide the necessary commands to go merge and replace a running configuration using a candidate object. This can prove useful to do smart replace/merge operations in systems that otherwise might not be able to do so. Note that Yangify doesn't provide the code to do that, Yangify only provides the framework. However, it provides a few reference implementations for documentation, demostrational and testing purposes.For an actual implementation of Parsers/Translators using Yangify you can refer to [rosetta](https://github.com/networktocode/rosetta). The DatamodelBefore we even start we need a datamodel. Yangify uses under the hoods [yangson](https://yangson.labs.nic.cz/). Yangson uses [RFC 7895](https://datatracker.ietf.org/doc/rfc7895/) to define the models to use so we are going to need a file describing our library and a list of paths where to find the modules being implemented/imported by the library:
###Code
from yangson.datamodel import DataModel
dm = DataModel.from_file("yang/yang-library-data.json", ["yang/yang-modules/ietf", "yang/yang-modules/openconfig"])
###Output
_____no_output_____
###Markdown
The datamodel can be reused across many parsers and translators. ParsingYou will see more details about this if you go to the tutorial but this is an example of how you could use a `Parser` written with Yangify to parse IOS configuration:
###Code
import tutorial_parser
from yangify import parser
from yangify.parser.text_tree import parse_indented_config
class IOSParser(parser.RootParser):
class Yangify(parser.ParserData):
def init(self) -> None:
self.root_native = parse_indented_config(self.native.splitlines())
self.native = self.root_native
interfaces = tutorial_parser.Interfaces
vlans = tutorial_parser.Vlans
with open("data/ios/config.txt", "r") as f:
config = f.read()
%cat data/ios/config.txt
p = IOSParser(dm, native=config)
result = p.process()
import json
print(json.dumps(result.raw_value(), indent=4))
###Output
{
"openconfig-interfaces:interfaces": {
"interface": [
{
"name": "FastEthernet1",
"config": {
"name": "FastEthernet1",
"type": "iana-if-type:ethernetCsmacd",
"description": "This is Fa1",
"enabled": false
},
"subinterfaces": {
"subinterface": [
{
"index": 1,
"config": {
"index": 1,
"description": "This is Fa1.1"
}
},
{
"index": 2,
"config": {
"index": 2,
"description": "This is Fa1.2"
}
}
]
}
},
{
"name": "FastEthernet3",
"config": {
"name": "FastEthernet3",
"type": "iana-if-type:ethernetCsmacd",
"description": "This is Fa3",
"enabled": true
}
},
{
"name": "FastEthernet4",
"config": {
"name": "FastEthernet4",
"type": "iana-if-type:ethernetCsmacd",
"enabled": false
}
}
]
},
"openconfig-vlan:vlans": {
"vlan": [
{
"vlan-id": 10,
"config": {
"vlan-id": 10,
"name": "prod",
"status": "ACTIVE"
}
},
{
"vlan-id": 20,
"config": {
"vlan-id": 20,
"name": "dev",
"status": "SUSPENDED"
}
}
]
}
}
###Markdown
TranslatingYou will see more details about this if you go to the tutorial but this is an example of how you could use a `Translator` written with Yangify to translate models into IOS configuration:
###Code
import tutorial_translator
from yangify import translator
from yangify.translator.config_tree import ConfigTree
class IOSTranslator(translator.RootTranslator):
class Yangify(translator.TranslatorData):
def init(self) -> None:
self.root_result = ConfigTree()
self.result = self.root_result
def post(self) -> None:
self.root_result = self.root_result.to_string()
interfaces = tutorial_translator.Interfaces
vlans = tutorial_translator.Vlans
%cat data/ios/data.json
import json
with open("data/ios/data.json", "r") as f:
data = json.load(f)
p = IOSTranslator(dm, candidate=data)
print(p.process())
###Output
interface FastEthernet1
description This is Fa1
shutdown
exit
!
interface FastEthernet1.1
description This is Fa1.1
exit
!
interface FastEthernet1.2
description This is Fa1.2
exit
!
interface FastEthernet3
description This is Fa3
no shutdown
exit
!
interface FastEthernet4
shutdown
exit
!
vlan 10
name prod
no shutdown
exit
!
vlan 20
name dev
shutdown
exit
!
###Markdown
(intro)= An Introduction to tinygpThis tutorial provides a brief introduction to how Gaussian Processes (GPs) are implemented in `tinygp`.We're not going to provide much of an introduction to GPs themselves, because there are already a lot of excellent resources for that, including [this text book](http://www.gaussianprocess.org/gpml/chapters/), [this blog post](https://distill.pub/2019/visual-exploration-gaussian-processes/), and many others that I'm sure you can find by Googling.Before we get started, it's pretty much always a good idea to [enable double precision](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.htmldouble-64bit-precision) before doing anything with `tinygp` and GPs in `jax`, so that we end up with fewer numerical precision issues:
###Code
import jax
jax.config.update("jax_enable_x64", True)
###Output
_____no_output_____
###Markdown
Kernel buildingIn `tinygp`, we primarily construct GP models, by specifying a "kernel" function defined using the building blocks in the {ref}`api-kernels`.For example, we can define an "exponential squared" or "radial basis function" kernel using:
###Code
from tinygp import kernels
kernel = kernels.ExpSquared(scale=1.5)
###Output
_____no_output_____
###Markdown
And we can plot its value (don't worry too much about the syntax here):
###Code
import numpy as np
import matplotlib.pyplot as plt
def plot_kernel(kernel, **kwargs):
dx = np.linspace(0, 5, 100)
plt.plot(dx, kernel(dx, dx[:1]), **kwargs)
plt.xlabel("dx")
plt.ylabel("k(dx)")
plot_kernel(kernel)
###Output
_____no_output_____
###Markdown
This kernel on its own is not terribly expressive, so we'll usually end up adding and multiplying kernels to build the function we want.For example, we can:- scale our kernel by a scalar,- add multiple different kernels, or- multiply kernels togetheras follows:
###Code
plot_kernel(kernel, label="original", ls="dashed")
kernel_scaled = 4.5 * kernels.ExpSquared(scale=1.5)
plot_kernel(kernel_scaled, label="scaled")
kernel_sum = kernels.ExpSquared(scale=1.5) + 2 * kernels.Matern32(scale=2.5)
plot_kernel(kernel_sum, label="sum")
kernel_prod = 2 * kernels.ExpSquared(scale=1.5) * kernels.Cosine(scale=2.5)
plot_kernel(kernel_prod, label="product")
_ = plt.legend()
###Output
_____no_output_____
###Markdown
For a lot of use cases, these operations will be sufficient to build the models that you need, but if not, check out the following tutorials for some more expressive examples: {ref}`kernels`, {ref}`transforms`, {ref}`geometry`, and {ref}`derivative`. SamplingOnce you have a kernel in hand, you can pass it to a {class}`tinygp.GaussianProcess` to handle most of the computations you need.The {class}`tinygp.GaussianProcess` will also need to know the input coordinates of your data `X` (let's just make some up for now) and it takes a little parameter `diag`, which specifies extra variance to add on the diagonal.When modeling real data, this can often be thought of as per-observation measurement uncertainty, but it may not always be obvious what to put there.That being said, you'll probably find that if you don't use the `diag` parameter you'll get a lot of `nan`s in your results, so it's generally good to at least provide some small value for `diag`.
###Code
from tinygp import GaussianProcess
# Let's make up some input coordinates (sorted for plotting purposes)
X = np.sort(np.random.default_rng(1).uniform(0, 10, 100))
gp = GaussianProcess(kernel, X)
###Output
_____no_output_____
###Markdown
This `gp` object now specifies a multivariate normal distribution over data points observed at `X`.It's sometimes useful to generate samples from this distribution to see what our prior looks like.We can do that using the {func}`GaussianProcess.sample` function, and this will be the first time we're going to need to do anything `jax`-specific (because of [how random numbers work in `jax`](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.htmlrandom-numbers)):
###Code
y = gp.sample(jax.random.PRNGKey(4), shape=(5,))
plt.plot(X, y.T, color="k", lw=0.5)
plt.xlabel("x")
plt.ylabel("sampled observations")
_ = plt.title("exponential squared kernel")
###Output
_____no_output_____
###Markdown
We can also generate samples for different kernel functions:
###Code
# Here we're using the product of kernels defined above
kernel_prod = 2 * kernels.ExpSquared(scale=1.5) * kernels.Cosine(scale=2.5)
gp = GaussianProcess(kernel_prod, X, diag=1e-5)
y = gp.sample(jax.random.PRNGKey(4), shape=(5,))
plt.plot(X, y.T, color="k", lw=0.5)
plt.xlabel("x")
plt.ylabel("sampled observations")
_ = plt.title("product of kernels")
###Output
_____no_output_____
###Markdown
It is quite common in the GP literature to set the mean function for our process to zero, but that isn't always what you want.Instead, you can set the mean to a different constant, or to a function:
###Code
# A GP with a non-zero constant mean
gp = GaussianProcess(kernel, X, diag=1e-5, mean=2.0)
y_const = gp.sample(jax.random.PRNGKey(4), shape=(5,))
# And a GP with a general mean function
def mean_function(x):
return 5 * jax.numpy.sin(x)
gp = GaussianProcess(kernel, X, diag=1e-5, mean=mean_function)
y_func = gp.sample(jax.random.PRNGKey(4), shape=(5,))
# Plotting these samples
_, axes = plt.subplots(2, 1, sharex=True)
ax = axes[0]
ax.plot(X, y_const.T, color="k", lw=0.5)
ax.axhline(2.0)
ax.set_ylabel("constant mean")
ax = axes[1]
ax.plot(X, y_func.T, color="k", lw=0.5)
ax.plot(X, jax.vmap(mean_function)(X), label="mean")
ax.legend()
ax.set_xlabel("x")
_ = ax.set_ylabel("mean function")
###Output
_____no_output_____
###Markdown
Conditioning & marginalizationWhen it comes time to fit data using a GP model, the key operations that `tinygp` provides are _conditioning_ and _marginalization_.For example, you may want to fit for the parameters of your kernel model (the length scale and amplitude, for example), and a good objective to use for that process is the marginal likelihood of the process evaluated for the observed data.In `tinygp`, this is accessed via the {func}`tinygp.GaussianProcess.log_probability` method, which takes the observed data `y` as input.(_Aside:_ The nomenclature for this method is a little tricky to get right, and we've settled on `log_probability` in `tinygp` since it is the multivariate normal probability density, but it's important to remember that it is a function of the data, making it a "sampling distribution" or "likelihood".)We won't actually go into details about how to use this method for fitting here (check out pretty much any of the other tutorials for examples!), but to compute the log probability for a given dataset and kernel, we would do something like the following:
###Code
# Simulate a made up dataset, as an example
random = np.random.default_rng(1)
X = np.sort(random.uniform(0, 10, 10))
y = np.sin(X) + 1e-4 * random.normal(size=X.shape)
# Compute the log probability
kernel = 0.5 * kernels.ExpSquared(scale=1.0)
gp = GaussianProcess(kernel, X, diag=1e-4)
print(gp.log_probability(y))
###Output
_____no_output_____
###Markdown
But we do want to go into more details about how `tinygp` implements conditioning because it might be a little counterintuitive at first (and it's also different in `v0.2` of `tinygp`).To condition a GP on observed data, we use the {func}`tinygp.GaussianProcess.condition` method, and that produces a named tuple with two elements as described in {class}`tinygp.gp.ConditionResult`.The first element `log_probability` is the same log probability as we calculated above:
###Code
cond = gp.condition(y)
print(cond.log_probability)
###Output
_____no_output_____
###Markdown
Then the second element `gp` is a new {class}`tinygp.GaussianProcess` describing the distribution at some test points (by default, the test points are the same as our inputs).This conditioned GP operates just like our `gp` above, but its `kernel` and `mean_function` have these strange types:
###Code
type(cond.gp.kernel), type(cond.gp.mean_function)
###Output
_____no_output_____
###Markdown
This is cool because that means that you can use this conditioned {class}`tinygp.GaussianProcess` to do all the things you would usually do with a GP (e.g. sample from it, condition it further, etc.).It is common to make plots like the following using these conditioned GPs (note that we're now using different test points):
###Code
X_test = np.linspace(0, 10, 100)
_, cond_gp = gp.condition(y, X_test)
# The GP object keeps track of its mean and variance, which we can use for
# plotting confidence intervals
mu = cond_gp.mean
std = np.sqrt(cond_gp.variance)
plt.plot(X_test, mu, "C1", label="mean")
plt.plot(X_test, mu + std, "--C1", label="1-sigma region")
plt.plot(X_test, mu - std, "--C1")
# We can also plot samples from the conditional
y_samp = cond_gp.sample(jax.random.PRNGKey(1), shape=(12,))
plt.plot(X_test, y_samp[0], "C0", lw=0.5, alpha=0.5, label="samples")
plt.plot(X_test, y_samp[1:].T, "C0", lw=0.5, alpha=0.5)
plt.plot(X, y, ".k", label="data")
plt.legend(fontsize=10)
plt.xlim(X_test.min(), X_test.max())
plt.xlabel("x")
_ = plt.ylabel("y")
###Output
_____no_output_____
###Markdown
TipsGiven the information we've covered so far, you may have just about everything you need to go on to the other tutorials, and start using `tinygp` for real.But there were a few last things that are worth mentioning first.First, since `tinygp` is built on top of `jax`, it can be very useful to spend some time learning about `jax`, and in particular the [How to Think in JAX](https://jax.readthedocs.io/en/latest/notebooks/thinking_in_jax.html) tutorial is a great place to start.One way that this plays out in `tinygp`, is that all the operations described in this tutorial are designed to be `jit` compiled, rather than executed directly like we've done here.Specifically, a very common pattern that you'll see is a functional model setup like the following:
###Code
import jax.numpy as jnp
def build_gp(params):
kernel = jnp.exp(params["log_amp"]) * kernels.ExpSquared(
jnp.exp(params["log_scale"])
)
return GaussianProcess(kernel, X, diag=jnp.exp(params["log_diag"]))
@jax.jit
def loss(params):
gp = build_gp(params)
return -gp.log_probability(y)
params = {
"log_amp": -0.1,
"log_scale": 0.0,
"log_diag": -1.0,
}
loss(params)
###Output
_____no_output_____
###Markdown
Intro
###Code
import takco
import requests
from IPython.display import HTML, display
html = requests.get('http://en.wikipedia.org/wiki/Mexico').text
for table in takco.TableSet.extract(source = [{'html': html}]):
display(HTML(takco.extract.htmltables.tableobj_to_html(table)))
from IPython.display import Markdown, display
from takco.link import MediaWikiAPI
uri = MediaWikiAPI().lookup_wikititle('Morelia')
Markdown(f" {uri} ")
###Output
_____no_output_____
###Markdown
IntroductionYangify is a framework that allows you to easily write code that can map structured and non-structured data into data modelled using YANG models. Yangify can also do the opposite operation and convert data modelled with YANG models into other structured or non-structured data. This allows you to easily write code that can parse native output/data/configuration from network devices and map them into YANG models and vice-versa.In addition, it can provide the necessary commands to go merge and replace a running configuration using a candidate object. This can prove useful to do smart replace/merge operations in systems that otherwise might not be able to do so. Note that Yangify doesn't provide the code to do that, Yangify only provides the framework. However, it provides a few reference implementations for documentation, demostrational and testing purposes.For an actual implementation of Parsers/Translators using Yangify you can refer to [ntc-rosetta](https://github.com/networktocode/ntc-rosetta). The DatamodelBefore we even start we need a datamodel. Yangify uses under the hoods [yangson](https://yangson.labs.nic.cz/). Yangson uses [RFC 7895](https://datatracker.ietf.org/doc/rfc7895/) to define the models to use so we are going to need a file describing our library and a list of paths where to find the modules being implemented/imported by the library:
###Code
from yangson.datamodel import DataModel
dm = DataModel.from_file("yang/yang-library-data.json", ["yang/yang-modules/ietf", "yang/yang-modules/openconfig"])
###Output
_____no_output_____
###Markdown
The datamodel can be reused across many parsers and translators. ParsingYou will see more details about this if you go to the tutorial but this is an example of how you could use a `Parser` written with Yangify to parse IOS configuration:
###Code
import tutorial_parser
from yangify import parser
from yangify.parser.text_tree import parse_indented_config
class IOSParser(parser.RootParser):
class Yangify(parser.ParserData):
def init(self) -> None:
self.root_native = parse_indented_config(self.native.splitlines())
self.native = self.root_native
interfaces = tutorial_parser.Interfaces
vlans = tutorial_parser.Vlans
with open("data/ios/config.txt", "r") as f:
config = f.read()
%cat data/ios/config.txt
p = IOSParser(dm, native=config)
result = p.process()
import json
print(json.dumps(result.raw_value(), indent=4))
###Output
{
"openconfig-interfaces:interfaces": {
"interface": [
{
"name": "FastEthernet1",
"config": {
"name": "FastEthernet1",
"type": "iana-if-type:ethernetCsmacd",
"description": "This is Fa1",
"enabled": false
},
"subinterfaces": {
"subinterface": [
{
"index": 1,
"config": {
"index": 1,
"description": "This is Fa1.1"
}
},
{
"index": 2,
"config": {
"index": 2,
"description": "This is Fa1.2"
}
}
]
}
},
{
"name": "FastEthernet3",
"config": {
"name": "FastEthernet3",
"type": "iana-if-type:ethernetCsmacd",
"description": "This is Fa3",
"enabled": true
}
},
{
"name": "FastEthernet4",
"config": {
"name": "FastEthernet4",
"type": "iana-if-type:ethernetCsmacd",
"enabled": false
}
}
]
},
"openconfig-vlan:vlans": {
"vlan": [
{
"vlan-id": 10,
"config": {
"vlan-id": 10,
"name": "prod",
"status": "ACTIVE"
}
},
{
"vlan-id": 20,
"config": {
"vlan-id": 20,
"name": "dev",
"status": "SUSPENDED"
}
}
]
}
}
###Markdown
TranslatingYou will see more details about this if you go to the tutorial but this is an example of how you could use a `Translator` written with Yangify to translate models into IOS configuration:
###Code
import tutorial_translator
from yangify import translator
from yangify.translator.config_tree import ConfigTree
class IOSTranslator(translator.RootTranslator):
class Yangify(translator.TranslatorData):
def init(self) -> None:
self.root_result = ConfigTree()
self.result = self.root_result
def post(self) -> None:
self.root_result = self.root_result.to_string()
interfaces = tutorial_translator.Interfaces
vlans = tutorial_translator.Vlans
%cat data/ios/data.json
import json
with open("data/ios/data.json", "r") as f:
data = json.load(f)
p = IOSTranslator(dm, candidate=data)
print(p.process())
###Output
interface FastEthernet1
description This is Fa1
shutdown
exit
!
interface FastEthernet1.1
description This is Fa1.1
exit
!
interface FastEthernet1.2
description This is Fa1.2
exit
!
interface FastEthernet3
description This is Fa3
no shutdown
exit
!
interface FastEthernet4
shutdown
exit
!
vlan 10
name prod
no shutdown
exit
!
vlan 20
name dev
shutdown
exit
!
|
examples/colab/component_examples/multilingual/chinese_ner_pos_and_tokenization.ipynb | ###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples//colab/component_examples/multilingual/chinese_ner_pos_and_tokenization.ipynb.ipynb) Detect Named Entities (NER), Part of Speech Tags (POS) and Tokenize in Chinese Install NLU
###Code
import os
! apt-get update -qq > /dev/null
# Install java
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! pip install nlu pyspark==2.4.7 > /dev/null
import nlu
###Output
_____no_output_____
###Markdown
Tokenize Chinese
###Code
# Tokenize in chinese
import nlu
# pipe = nlu.load('zh.tokenize') This is an alias that gives you the same model
pipe = nlu.load('zh.segment_words')
# Chinese for 'Donald Trump and Angela Merkel dont share many opinions'
zh_data = ['唐纳德特朗普和安吉拉·默克尔没有太多意见']
df = pipe.predict(zh_data, output_level='token')
df
###Output
wordseg_weibo download started this may take some time.
Approximate size to download 1.2 MB
[OK!]
###Markdown
Extract Chinese POS
###Code
# Extract Part of Speech
pipe = nlu.load('zh.pos')
zh_data = ['唐纳德特朗普和安吉拉·默克尔没有太多意见']
df = pipe.predict(zh_data, output_level='document')
df
###Output
pos_ud_gsd download started this may take some time.
Approximate size to download 3.4 MB
[OK!]
wordseg_weibo download started this may take some time.
Approximate size to download 1.2 MB
[OK!]
###Markdown
Extract Chinese NER
###Code
# Extract named chinese entities
pipe = nlu.load('zh.ner')
zh_data = ['唐纳德特朗普和安吉拉·默克尔没有太多意见']
df = pipe.predict(zh_data, output_level='document')
df
###Output
ner_msra_bert_768d download started this may take some time.
Approximate size to download 19.2 MB
[OK!]
wordseg_weibo download started this may take some time.
Approximate size to download 1.2 MB
[OK!]
bert_base_chinese download started this may take some time.
Approximate size to download 367.6 MB
[OK!]
###Markdown
Translate Chinese extracted named entities to English
###Code
# Translate Chinese extracted named entities to English
translate_pipe = nlu.load('zh.translate_to.en')
en_entities = translate_pipe.predict(df.entities.str.join('.').values.tolist())
en_entities
###Output
_____no_output_____ |
introduction-to-data-science-for-educators/introduction-to-data-science-for-educators.ipynb | ###Markdown
 Introduction to Data Science and Big Data For EducatorsDavid Hay ([@misterhay](https://twitter.com/misterhay))Dr. Michael Lamoureux[Callysto.ca](https://callysto.ca) | [@callysto_canada](https://twitter.com/callysto_canada) Introduction to Data Science and Big Data For EducatorsThe ability to critically analyse large sets of data is becoming increasingly important, and there are many applications in education. We will introduce participants to the fundamentals of data science, and look at how you can incorporate data science into your teaching. You will come away with an increased understanding of this topic as well as some practical activities to use in your learning environment. Data ScienceData science involves obtaining and **communicating** information from (usually large) sets of observations.* collecting, cleaning, manipulating, visualizing, synthesizing* describing, diagnosing, predicting, prescribing Why is Data Science Important? What Does Data Science Look Like?e.g. [Gapminder animation](https://www.gapminder.org/tools/$model$markers$bubble$encoding$frame$scale$domain@=1800&=2019;;;;;;;&chart-type=bubbles&url=v1) How Can We Introduce Data Science? Jupyter NotebooksA Jupyter notebook is an online document that can include both **formatted text** and (Python) `code` in different “cells” or parts of the document.These documents run on [Callysto Hub](https://hub.callysto.ca/) as well as [Google Colab](https://colab.research.google.com/), [IBM Watson Studio](https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/notebooks-parent.html), and other places.We'll be using Python code in Jupyter notebooks for data science and computational thinking.Links in this slideshow (and on Callysto.ca) create copies of Jupyter notebooks in your (and your students’) Callysto Hub accounts. This slideshow is also a Jupyter notebook. Visualizing DataVisualizations of data help with analysis and storytelling.* Usually include tables and graphsIn a Jupyter notebook with Python code, a graph can be as easy as:
###Code
import plotly.express as px
px.pie(names=['left-handed', 'right-handed'], values=[3, 21], title='Handedness of People in our Class')
px.scatter(x=[1, 2, 3, 4], y=[1, 4, 9, 16])
labels = ['English','French','Aboriginal Languages','Other']
values = [56.9,21.3,0.6,21.2]
px.bar(x=labels, y=values, title='First Languages Spoken in Canada')
###Output
_____no_output_____
###Markdown
Using Online DataWe can import data from webpages or other files hosted online. Examples of Data Sources* Wikipedia* Gapminder* Statistics Canada* Canada Open Data* Alberta Open Data * Many cities and municipalities have open data portals
###Code
url = 'https://en.wikipedia.org/wiki/List_of_Alberta_general_elections'
import pandas as pd
df = pd.read_html(url)[1]
df
px.histogram(df, x='Winner', title='Political Parties Elected in Alberta')
###Output
_____no_output_____
###Markdown
CSV Data Online
###Code
from geopy.geocoders import Nominatim
geolocator = Nominatim(user_agent='Callysto Demonstration')
coordinates = geolocator.geocode('Grande Prairie, AB')
temperature_url = 'https://climateknowledgeportal.worldbank.org/api/data/get-download-data/historical/tas/1901-2016/'+str(coordinates.latitude)+'$cckp$'+str(coordinates.longitude)+'/'+str(coordinates.latitude)+'$cckp$'+str(coordinates.longitude)
temperatures = pd.read_csv(temperature_url)
temperatures
px.scatter(temperatures, x=' Statistics', y='Temperature - (Celsius)', color=' Year',
title='Monthy Average Temperatures in Grande Prairie from 1901-2016')
px.line(temperatures, x=' Year', y='Temperature - (Celsius)', color=' Statistics',
title='Monthy Average Temperatures in Grande Prairie from 1901-2016')
px.bar(temperatures, x=' Statistics', y='Temperature - (Celsius)', animation_frame=' Year',
title='Temperatures in Grande Prairie').update_layout(yaxis_range=[-30, 30])
###Output
_____no_output_____
###Markdown
Data FormattingYou may find data in "tidy" or "wide" format. Tidy (Long) DataOne observation per row.Name|Assignment|Mark-|-|-Marie|Radium Report|88Marie|Polonium Lab|84Jane|Primate Report|94Jane|Institute Project|77Mae|Endeavour Launch|92Jennifer|Genetics Project|87 Wide DataMultiple columns for variables.Name|Science Lab|Science Report|Spelling Test|Math Worksheet|Discussion Questions-|-|-|-|-|-Ryder|80|60|90|70|80Marshall|60|70|70|80|90Skye|90|80|90|90|80Everest|80|90|80|70|90Data can be converted from one format to another, depending on how it is going to be visualized. MarkdownFor formatting text in notebooks, e.g. **bold** and *italics*.[Markdown Cheatsheet](https://www.ibm.com/support/knowledgecenter/SSHGWL_1.2.3/analyze-data/markd-jupyter.html) LaTeXMathematical and scientific formatting, e.g. $m = \frac{E}{c^2}$$6 CO_2 + 6H_2O → C_6H_12O_6 + 6 O_2$[LaTeX Cheatsheet](https://davidhamann.de/2017/06/12/latex-cheat-sheet) Curriculum NotebooksThe [Callysto](https://www.callysto.ca) project has been developing free curriculum-aligned notebooks and other resources. Some of my Favorite Notebooks* [Statistics Project](https://github.com/callysto/curriculum-notebooks/tree/master/Mathematics/StatisticsProject)* [Orphan Wells](https://github.com/callysto/curriculum-notebooks/blob/master/SocialStudies/OrphanWells/orphan-wells.ipynb)* [Survive the Middle Ages](https://github.com/callysto/curriculum-notebooks/blob/master/SocialStudies/SurviveTheMiddleAges/survive-the-middle-ages.ipynb)* [Asthma Rates](https://github.com/callysto/curriculum-notebooks/blob/master/Health/AsthmaRates/asthma-rates.ipynb)* [Climate Graphs](https://github.com/callysto/curriculum-notebooks/blob/master/Science/Climatograph/climatograph.ipynb)* [Shakespeare and Statistics](https://github.com/callysto/curriculum-notebooks/blob/master/EnglishLanguageArts/ShakespeareStatistics/shakespeare-and-statistics.ipynb)* [Word Clouds](https://github.com/callysto/curriculum-notebooks/blob/master/EnglishLanguageArts/WordClouds/word-clouds.ipynb) Data Visualizations and Interesting Problems[Weekly Data Visualizations](https://www.callysto.ca/weekly-data-visualization) are pre-made, introductory data science lessons. They are a way for students to develop critical thinking and problem solving skills. We start with a question, find an open dataset to answer the question, and then ask students to reflect.[Interesting Problems](https://www.callysto.ca/interesting-problems/) are notebooks and often videos series that demonstrate critical thinking skills, and use programming code to solve interesting problems. HackathonsOnline hackathons, either facilitated or [planned yourself](https://docs.google.com/document/d/1tnHhiE554xAmMRbU9REiJZ0rkJmxtNlkkQVCFfCoowE), enable students and educators to collaborate intensely to explore data and solve problems. Introducing Data Science to StudentsVisualizations: [explore](https://www.youcubed.org/resource/data-talks), modify, [create](http://bit.ly/2RXTLz8)* Can start with Callysto resources* Consider "ask three then me"[Educator Starter Kit](https://www.callysto.ca/starter-kit)[Online courses](https://www.callysto.ca/distance-learning)[Basics of Python and Jupyter](https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fpresentations&branch=master&subPath=IntroductionToJupyterAndPython/callysto-introduction-to-jupyter-and-python-1.ipynb&depth=1)[Troubleshooting](https://www.callysto.ca/troubleshooting) TurtlesAnother way to introduce students to Python, Jupyter, and data science.Start with Python turtles:* [Python Turtles student version](https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2FTMTeachingTurtles&branch=master&subPath=TMPythonTurtles/turtles-and-python-intro-student.ipynb&depth=1)* [Python Turtles instructor version (key)](https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2FTMTeachingTurtles&branch=master&subPath=TMPythonTurtles/turtles-and-python-intro-instructor.ipynb&depth=1)
###Code
from mobilechelonian import Turtle
t = Turtle()
t.forward(50)
t.right(90)
t.penup()
t.forward(30)
###Output
_____no_output_____ |
docs/using-a-different-corpus.ipynb | ###Markdown
Using a Different CorpusWordSegment makes it easy to use a different corpus for word segmentation.If you simply want to "teach" the algorithm a single phrase it doesn't know then read [this StackOverflow answer](http://stackoverflow.com/questions/20695825/english-word-segmentation-in-nlp).Now, let's get a new corpus. For this example, we'll use the text from Jane Austen's *Pride and Prejudice*.
###Code
import requests
response = requests.get('https://www.gutenberg.org/ebooks/1342.txt.utf-8')
text = response.text
print len(text)
###Output
717573
###Markdown
Great. We've got a new corpus for `wordsegment`. Now let's look at what parts of the API we need to change. There's one function and two dictionaries: `wordsegment.clean`, `wordsegment.bigram_counts` and `wordsegment.unigram_counts`. We'll work on these in reverse.
###Code
import wordsegment
print type(wordsegment.unigram_counts), type(wordsegment.bigram_counts)
print wordsegment.unigram_counts.items()[:3]
print wordsegment.bigram_counts.items()[:3]
###Output
[('biennials', 37548.0), ('verplank', 48349.0), ('tsukino', 19771.0)]
[('personal effects', 151369.0), ('basic training', 294085.0), ('it absolutely', 130505.0)]
###Markdown
Ok, so `wordsegment.unigram_counts` is just a dictionary mapping unigrams to their counts. Let's write a method to tokenize our text.
###Code
import re
def tokenize(text):
pattern = re.compile('[a-zA-Z]+')
return (match.group(0) for match in pattern.finditer(text))
print list(tokenize("Wait, what did you say?"))
###Output
['Wait', 'what', 'did', 'you', 'say']
###Markdown
Now we'll build our dictionaries.
###Code
from collections import Counter
wordsegment.unigram_counts = Counter(tokenize(text))
def pairs(iterable):
iterator = iter(iterable)
values = [next(iterator)]
for value in iterator:
values.append(value)
yield ' '.join(values)
del values[0]
wordsegment.bigram_counts = Counter(pairs(tokenize(text)))
###Output
_____no_output_____
###Markdown
That's it.Now, by default, `wordsegment.segment` lowercases all input and removes punctuation. In our corpus we have capitals so we'll also have to change the `clean` function. Our heaviest hammer is to simply replace it with the identity function. This will do no sanitation of the input to `segment`.
###Code
def identity(value):
return value
wordsegment.clean = identity
wordsegment.segment('wantofawife')
###Output
_____no_output_____
###Markdown
If you find this behaves poorly then you may need to change the `wordsegment.TOTAL` variable to reflect the total of all unigrams. In our case that's simply:
###Code
wordsegment.TOTAL = float(sum(wordsegment.unigram_counts.values()))
###Output
_____no_output_____
###Markdown
Using a Different Corpuszh_segment makes it easy to use a different corpus for word segmentation.If you simply want to "teach" the algorithm a single phrase it doesn't know then read [this StackOverflow answer](http://stackoverflow.com/questions/20695825/english-word-segmentation-in-nlp).Now, let's get a new corpus. For this example, we'll use the text from Jane Austen's *Pride and Prejudice*.
###Code
import requests
response = requests.get('https://www.gutenberg.org/ebooks/1342.txt.utf-8')
text = response.text
print len(text)
###Output
717573
###Markdown
Great. We've got a new corpus for `zh_segment`. Now let's look at what parts of the API we need to change. There's one function and two dictionaries: `zh_segment.clean`, `zh_segment.bigram_counts` and `zh_segment.unigram_counts`. We'll work on these in reverse.
###Code
import zh_segment
print type(zh_segment.unigram_counts), type(zh_segment.bigram_counts)
print zh_segment.unigram_counts.items()[:3]
print zh_segment.bigram_counts.items()[:3]
###Output
[('biennials', 37548.0), ('verplank', 48349.0), ('tsukino', 19771.0)]
[('personal effects', 151369.0), ('basic training', 294085.0), ('it absolutely', 130505.0)]
###Markdown
Ok, so `zh_segment.unigram_counts` is just a dictionary mapping unigrams to their counts. Let's write a method to tokenize our text.
###Code
import re
def tokenize(text):
pattern = re.compile('[a-zA-Z]+')
return (match.group(0) for match in pattern.finditer(text))
print list(tokenize("Wait, what did you say?"))
###Output
['Wait', 'what', 'did', 'you', 'say']
###Markdown
Now we'll build our dictionaries.
###Code
from collections import Counter
zh_segment.unigram_counts = Counter(tokenize(text))
def pairs(iterable):
iterator = iter(iterable)
values = [next(iterator)]
for value in iterator:
values.append(value)
yield ' '.join(values)
del values[0]
zh_segment.bigram_counts = Counter(pairs(tokenize(text)))
###Output
_____no_output_____
###Markdown
That's it.Now, by default, `zh_segment.segment` lowercases all input and removes punctuation. In our corpus we have capitals so we'll also have to change the `clean` function. Our heaviest hammer is to simply replace it with the identity function. This will do no sanitation of the input to `segment`.
###Code
def identity(value):
return value
zh_segment.clean = identity
zh_segment.segment('wantofawife')
###Output
_____no_output_____
###Markdown
If you find this behaves poorly then you may need to change the `zh_segment.TOTAL` variable to reflect the total of all unigrams. In our case that's simply:
###Code
zh_segment.TOTAL = float(sum(zh_segment.unigram_counts.values()))
###Output
_____no_output_____
###Markdown
zh_segment doesn't require any fancy machine learning training algorithms. Simply update the unigram and bigram count dictionaries and you're ready to go.
###Code
###Output
_____no_output_____ |
fifa19-slide-deck.ipynb | ###Markdown
EA FIFA 19 Players Stats by Faress Eissa Data> Detailed attributes for every player registered in the latest edition of FIFA 19 database. Scraping code at GitHub repo: https://github.com/amanthedorkknight/fifa18-all-player-statistics/tree/master/2019 Introduction> This is a data visualization project for the dataset on hand to explore various relationships between players qualities and attributes in order to gain insights about what type of players have higher Overall , Value and Wage scores. > The project even goes beyond those main features and explores other secondary ones to understands what affects and defines them.
###Code
# import all packages and set plots to be embedded inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# suppress warnings from final output
import warnings
warnings.simplefilter("ignore")
# Load dataset
data= pd.read_csv('data.csv')
data
## Transfer Height column into a single value in cm
Height= data.Height.str.split("'", expand= True)
foot= Height[0].astype('float')
inch= Height[1].astype('float') / 12
data['Height'] = round((foot + inch) * 30.48)
## transfer Weight column into a single value in Kg
data['Weight']= (data.Weight.str.split('l', expand= True)[0]).astype('float')
data['Weight']= round(data.Weight.apply(lambda x: x * 0.453592))
# filling the missing value for the continous variables for proper data visualization
numeric= ['ShortPassing','Volleys','Dribbling','Curve','FKAccuracy',
'LongPassing','BallControl','HeadingAccuracy','Finishing'
,'Crossing','Weight','Height','Skill Moves','Weak Foot',
'Acceleration','SprintSpeed' ,'Agility','Reactions',
'Balance','ShotPower','Jumping','Stamina' ,'Strength',
'LongShots' ,'Aggression','Interceptions' ,'Positioning',
'Vision','Penalties' ,'Composure','Marking' ,
'StandingTackle','SlidingTackle' ,'GKDiving' ,
'GKHandling' ,'GKKicking' ,'GKPositioning' ,'GKReflexes']
for c in numeric:
data[c].fillna(data[c].median(), inplace = True)
# filling other missing values with default entry
data['Body Type'].fillna('Normal', inplace = True)
data['Club'].fillna('No Club', inplace = True)
data['Work Rate'].fillna('Medium/ Medium', inplace = True)
data['Preferred Foot'].fillna('Right', inplace = True)
data['International Reputation'].fillna(1, inplace = True)
# create several columns inclusive to the mean of corresponding attributes
data['Defending'] = round(data[['Marking', 'StandingTackle',
'SlidingTackle','Interceptions','HeadingAccuracy']].mean(axis= 1))
data['Dribble'] = round(data[[ 'Dribbling', 'Acceleration',
'BallControl']].mean(axis= 1))
data['Mental'] = round(data[['Aggression', 'Interceptions', 'Positioning',
'Vision','Composure']].mean(axis=1))
data['Passing'] = round(data[['Crossing', 'ShortPassing',
'LongPassing','Curve']].mean(axis= 1))
data['Mobility'] = round(data[['Acceleration', 'SprintSpeed',
'Agility','Reactions']].mean(axis= 1))
data['Power'] = round(data[['Balance', 'Jumping', 'Stamina',
'Strength']].mean(axis= 1))
data['Shooting'] = round(data[['Finishing', 'Volleys', 'FKAccuracy',
'ShotPower','LongShots', 'Penalties','Curve','HeadingAccuracy']].mean(axis= 1))
data['GK']= round(data[['GKDiving', 'GKHandling', 'GKKicking',
'GKPositioning','GKReflexes']].mean(axis= 1))
# create a new df with selected columns
df= data[['ID', 'Name', 'Age','Nationality','Overall','Potential',
'Club','Value','Wage','Special','Defending','Dribble',
'Mental','Passing','Mobility','Power','Shooting','GK',
'Preferred Foot','International Reputation','Weak Foot',
'Skill Moves','Work Rate','Position' ,'Jersey Number',
'Joined','Contract Valid Until','Height','Weight' ]].copy()
#Cleaning some of values so that we can interpret them
def value_to_int(df_value):
try:
value = float(df_value[1:-1])
suffix = df_value[-1:]
if suffix == 'M':
value = value * 1000000
elif suffix == 'K':
value = value * 1000
except ValueError:
value = 0
return value
df['Value'] = df['Value'].apply(value_to_int)
df['Wage'] = df['Wage'].apply(value_to_int)
###Output
_____no_output_____
###Markdown
Structure of the Dataset> This dataset consists of 18207 entries of different player and 89 columns including players' name, nationality, club and other attributes Investigation Overview > The main features of interest are Overall , Value and Wage.> Features like Club, Nationality, Age, Position are the most helpful features to identify which players have higher Overall , Value and Wage scores.> All features either main or secondary have some very interesting relationships between them.
###Code
# set base color to be used and set style
base_clr= sns.color_palette('deep')[0]
sns.set_style('whitegrid')
## Combine Position Values into 4 main categories
df.Position.replace(['ST','RF','LW','LF','LS','RW',
'RS', 'CF' ], 'FR', inplace= True)
df.Position.replace(['RCB','LCB','LB','CB','RB','RWB'
, 'LWB' ], 'DF', inplace= True)
df.Position.replace(['LCM', 'LDM','CAM','CDM','RM','LAM','RDM', 'LM','CM',
'RCM', 'RAM'], 'MD', inplace= True)
df['Position']= df['Position'].astype('category')
# Change Value and Wage min value for the sake of using log_trans
df.Value.replace(0,1,inplace=True)
df.Wage.replace(0,1,inplace=True)
# a function to transfer a value to its log
def log_trans(x, inverse = False):
""" quick function for computing log and power operations """
if not inverse:
return np.log10(x)
else:
return np.power(10, x)
df['log_Value'] = df['Value'].apply(log_trans)
df['log_Wage'] = df['Wage'].apply(log_trans)
## Use log Value and Wage columns to perform a hist, as original values have a very long tail and highly skewed
plt.figure(figsize=[14.70, 8.27])
plt.suptitle('Distribution of Value and Wage among all players')
plt.figtext(0.48, 0.06,'Amounts in Euro')
plt.subplot(1, 2, 1)
bins= np.arange(2.5, df.log_Value.max() + 0.05, 0.05)
ticks= log_trans(np.array([2e4,6e4, 2e5,6e5,2e6,6e6,2e7, 6e7,2e8]))
labels= ['20k','60k','200k','600k','2M','6M','20M','60M','200M']
plt.hist(data= df, x= 'log_Value',bins= bins);
plt.xlim([3.5,9])
plt.xticks(ticks, labels);
plt.title('Value distribution')
plt.subplot(1, 2, 2)
ticks= log_trans(np.array([1000, 3000, 10000, 30000, 100000, 300000]))
labels= ['1k', '3k', '10k', '30k', '100k', '300k']
bins= np.arange(2.5, df.log_Wage.max() + 0.03, 0.03)
plt.hist(data= df, x= 'log_Wage', bins= bins);
plt.xticks(ticks, labels);
plt.title('Wage distribution');
###Output
_____no_output_____
###Markdown
The above two graphs shows the distribution of players' Values and Wages.> Distribution of the original values returns highly skewed long tailed graphs with most values on the low end. So I use the log values for the graphs.> Distribution of log Values is normal with most values between 200k and 2M with a drop and a jump right before the 2M mark.> Distribution of log Wages is right skewed with spikes on the left side of the graph and a decreasing tail on the right.
###Code
## list top 10 clubs according to Sum of Value of team palyers
top_clubs= df.groupby(['Club']).sum()['Value'].sort_values(ascending= False).head(10).index
clbs = df[df.Club.isin(top_clubs)]
fig, axes = plt.subplots(2, 2,figsize=[14.70, 8.27])
fig.suptitle('Top Clubs VS different variables', fontsize=16,y= 1.05 )
axes = axes.flatten()
plt.sca(axes[0])
sns.boxplot(clbs.Overall, clbs.Club, color= base_clr
, order= clbs.groupby(['Club']).median()['Overall'].sort_values(ascending= False).index);
plt.xlim(55,100);
plt.sca(axes[2])
sns.boxplot(clbs.Potential, clbs.Club, color= sns.color_palette()[7],
order= clbs.groupby(['Club']).median()['Potential'].sort_values(ascending= False).index);
plt.xlim(55,100);
plt.sca(axes[1])
sns.boxplot(clbs.Age, clbs.Club, color= base_clr,order= clbs.groupby(['Club']).median()['Age'].sort_values().index)
;
plt.sca(axes[3])
sns.boxplot(clbs.Wage, clbs.Club, color= base_clr,order= clbs.groupby(['Club']).median()['Wage'].sort_values(ascending= False).index)
ticks=[5e4, 15e4, 25e4, 35e4, 45e4, 55e4]
labels= ['50k', '150k', '250k', '350k', '450k', '550k']
plt.xticks(ticks, labels)
plt.tight_layout()
;
###Output
_____no_output_____
###Markdown
The above graphs shows the relationships between Potential, Overall , Age and Wage VS top clubs. Top 10 clubs are defined by the sum of value of their players.> We observe that when speaking about Potential in comparison to Overall , all teams seem to improve with players' scores are spread out over smaller range.> Juventus is an interesting case, the team's Overall, Potential and Wage values are distributed over the smallest range, which indicates that the club is very specific regarding choosing its players, and is trying to maintain equal Wage for its players, but it has the highest age range which means that the current team's Overall is going to decline faster over time.> We observe some interesting changes in terms of teams order, as Real Madrid jumps to top place according to potential, compared to holding seventh place according to Overall.> As a general rule by taking the third graph into account, most of the younger teams jump in order when transitioning from viewing Overall to Potential.> In the fourth graph, 50% of top clubs' players earn 50K or more, with most players sit between the 50K and 150K marks.> Half the teams have an outlier on the higher side, that is the player who is considered the club's star.
###Code
# Create a dataframe with only top 10 countries based on value counts
top_counties= df.groupby(['Nationality']).count().sort_values(by=['ID'], ascending= False).head(10).index
cntrs = df[df.Nationality.isin(top_counties)].copy()
stats= ['Defending','Dribble','Mental','Passing','Mobility',
'Power','Shooting','GK']
# create a dataframe including only Position and various players' attributes for easier plotting
df_stats= df.groupby(['Position']).mean()[stats]
df_stats.plot(kind= 'bar', figsize=[14.70, 8.27]);
plt.legend(bbox_to_anchor=(1, 1));
plt.title('Different attributes distribution among Positions');
###Output
_____no_output_____
###Markdown
The above graph shows the distribution of various players' stats VS Position> One observation is that two of the most important attributes for a player in any position is Power and Mobility> Another observation is that Midfield players tend to be the most well rounded of all positions , with each other position being special in a specific attribute
###Code
ticks= log_trans(np.array([2e4,6e4, 2e5,6e5,2e6,6e6,2e7, 6e7,2e8]))
labels= ['20k','60k','200k','600k','2M','6M','20M','60M','200M']
g= sns.FacetGrid(data= clbs, hue= 'Position', height = 8.27, aspect = 14.70/8.27);
g.map(plt.scatter, 'Overall', 'Wage', alpha= 0.7);
ticks=[5e4, 15e4, 25e4, 35e4, 45e4, 55e4]
labels= ['50k', '150k', '250k', '350k', '450k', '550k']
plt.yticks(ticks, labels)
g.add_legend();
plt.title('Relationship between Overall, Wage and Position');
###Output
_____no_output_____
###Markdown
The above graph shows the relationship between Overall VS Wage, with Position added as a third element> We notice that Overall VS Wage have a positive exponential relationship, as higher rated players tend to get paid far more than lower rated ones> Another observation that all players who get paid more than 250K except one, are either midfielders or forwards.
###Code
plt.figure(figsize=[14.70, 8.27])
cat_means = cntrs.groupby(['Nationality', 'Position']).mean()['Overall']
cat_means = cat_means.reset_index(name = 'Overall_avg')
cat_means = cat_means.pivot(index = 'Nationality', columns = 'Position',
values = 'Overall_avg')
g= sns.heatmap(cat_means, annot = True, fmt = '.2f',cmap= 'YlGnBu'
,linewidths=.5, cbar_kws = {'label' : 'mean(Overall)'});
g.set(ylim= (0,10));
plt.title('Mean Overall distribution among Positions and Countries');
###Output
_____no_output_____ |
modules/module_14_4/TF-Linear-Regression.ipynb | ###Markdown
Basic regression: Predict fuel efficiency This notebook uses the classic Auto MPG Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.This example uses the tf.keras API
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# Make numpy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
print(tf.__version__)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(url, names=column_names,
na_values='?', comment='\t',
sep=' ', skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
dataset.isna().sum()
dataset = dataset.dropna()
dataset['Origin'] = dataset['Origin'].map({1: 'USA', 2: 'Europe', 3: 'Japan'})
dataset = pd.get_dummies(dataset, columns=['Origin'], prefix='', prefix_sep='')
dataset.tail()
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
sns.pairplot(train_dataset[['MPG', 'Cylinders', 'Displacement', 'Weight']], diag_kind='kde')
train_dataset.describe().transpose()
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('MPG')
test_labels = test_features.pop('MPG')
train_dataset.describe().transpose()[['mean', 'std']]
normalizer = preprocessing.Normalization(axis=-1)
normalizer.adapt(np.array(train_features))
print(normalizer.mean.numpy())
first = np.array(train_features[:1])
with np.printoptions(precision=2, suppress=True):
print('First example:', first)
print()
print('Normalized:', normalizer(first).numpy())
###Output
First example: [[ 4. 90. 75. 2125. 14.5 74. 0. 0. 1. ]]
Normalized: [[-0.87 -1.01 -0.79 -1.03 -0.38 -0.52 -0.47 -0.5 0.78]]
###Markdown
Linear Regression One Variable Create Normalization layer
###Code
horsepower = np.array(train_features['Horsepower'])
horsepower_normalizer = preprocessing.Normalization(input_shape=[1,], axis=None)
horsepower_normalizer.adapt(horsepower)
horsepower_model = tf.keras.Sequential([
horsepower_normalizer,
layers.Dense(units=1)
])
horsepower_model.summary()
horsepower_model.predict(horsepower[:10])
horsepower_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error'
)
%%time
history = horsepower_model.fit(
train_features['Horsepower'], train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 10])
plt.xlabel('Epoch')
plt.ylabel('Error [MPG]')
plt.legend()
plt.grid(True)
plot_loss(history)
test_results = {}
test_results['horsepower_model'] = horsepower_model.evaluate(
test_features['Horsepower'],
test_labels, verbose=0)
x = tf.linspace(0.0, 250, 251)
y = horsepower_model.predict(x)
def plot_horsepower(x, y):
plt.scatter(train_features['Horsepower'], train_labels, label='Data')
plt.plot(x, y, color='k', label='Predictions')
plt.xlabel('Horsepower')
plt.ylabel('MPG')
plt.legend()
plot_horsepower(x,y)
###Output
_____no_output_____
###Markdown
Multiple Inputs
###Code
linear_model = tf.keras.Sequential([
normalizer,
layers.Dense(units=1)
])
linear_model.predict(train_features[:10])
# m / slope
linear_model.layers[1].kernel
linear_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
%%time
history = linear_model.fit(
train_features, train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
plot_loss(history)
test_results['linear_model'] = linear_model.evaluate(
test_features, test_labels, verbose=0)
###Output
_____no_output_____
###Markdown
DNN regression
###Code
def build_and_compile_model(norm):
model = keras.Sequential([
norm,
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
model.compile(loss='mean_absolute_error',
optimizer=tf.keras.optimizers.Adam(0.001))
return model
dnn_horsepower_model = build_and_compile_model(horsepower_normalizer)
dnn_horsepower_model.summary()
%%time
history = dnn_horsepower_model.fit(
train_features['Horsepower'], train_labels,
validation_split=0.2,
verbose=0, epochs=100)
plot_loss(history)
x = tf.linspace(0.0, 250, 251)
y = dnn_horsepower_model.predict(x)
plot_horsepower(x, y)
test_results['dnn_horsepower_model'] = dnn_horsepower_model.evaluate(
test_features['Horsepower'], test_labels,
verbose=0)
###Output
_____no_output_____
###Markdown
Full Model
###Code
dnn_model = build_and_compile_model(normalizer)
dnn_model.summary()
%%time
history = dnn_model.fit(
train_features, train_labels,
validation_split=0.2,
verbose=0, epochs=100)
plot_loss(history)
test_results['dnn_model'] = dnn_model.evaluate(test_features, test_labels, verbose=0)
###Output
_____no_output_____
###Markdown
Performance
###Code
pd.DataFrame(test_results, index=['Mean absolute error [MPG]']).T
###Output
_____no_output_____
###Markdown
Make predictions
###Code
test_predictions = dnn_model.predict(test_features).flatten()
a = plt.axes(aspect='equal')
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
lims = [0, 50]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
test_labels
error = test_predictions - test_labels
plt.hist(error, bins=25)
plt.xlabel('Prediction Error [MPG]')
_ = plt.ylabel('Count')
dnn_model.save('dnn_model')
reloaded = tf.keras.models.load_model('dnn_model')
test_results['reloaded'] = reloaded.evaluate(
test_features, test_labels, verbose=0)
pd.DataFrame(test_results, index=['Mean absolute error [MPG]']).T
###Output
_____no_output_____ |
pandemic/pandemic.ipynb | ###Markdown
Single-core processingThe following specified the basic properties of the simulation. The most important properties about the disease are:- The infectious parameters`inf_spec`=[$r_{\rm inf}, t_{\rm avg},t_{\rm std}$]- The recovery parameters`recov_spec`=[$ t_{\rm avg},t_{\rm std}$]- The death parameters`dead_spec`=[$ t_{\rm avg},t_{\rm std}$]Note the $r_{\rm inf}$ is the radii of the infectious zone. Only when subject enters this zone will trigger the program to sample the condition that if the subject will be infected. The unit is meter.Others like $t_{\rm avg}$ and $t_{\rm std}$ are the average time and standard deviation of reaching that condition. The unit is hour.
###Code
# Specified basic parameters
# number of ill in the beginning
n_ill = 1
# number of health in the beginning
n_health = 999
# specified the parameters for infection, recovery and death
inf_spec = [3,0.25,0.5]
recov_spec = [35*24,10*24]
dead_spec = [40*24,10*24]
# Size of the box, meters
box_size = [[-600,600],[-600,600]]
# Initial positions of the subjects, meters
prange = [[-600,600],[-600,600]]
# Initial velocity of the subjects, meters/hour
vrange = [5,30]
# the protectability of wearing mask, it should be positive and larger than o1
mask = None
# size of the time step, the smaller the better resolution in time but also more time consuming
dt = 0.1
# How many days to be simulated. Just change the days instead of steps for convenience
days = 30
steps = int(days*24*(1/dt))
# Should it save the data after running the simulation
save_data = True
# Should the programe adjust the dt size to resolve the enough time resolution when the user-input is too corase-grained?
self_adaptive = False
# Output summary plot dpi
dpi = 150
# Name of the disease. U stands unknown
disease_name = 'C-Flu-2019'
###Output
_____no_output_____
###Markdown
In the above, I set `save_data=True`, which when the simulation is over, it will automatically save the simulated data into a folder named `disease_name` in the same location as this notebook. After running this simulation, it will also displays the corresponding time evolving statistics.
###Code
# Running the simulation according to the above specified properties
_ = PandemicSimulation(n_ill = n_ill,n_health = n_health,steps = steps, dt = dt,
inf_spec = inf_spec,recov_spec = recov_spec,dead_spec = dead_spec,
mask_protect = mask, self_adaptive = self_adaptive, save_data = save_data,
disease_name = disease_name, box_size = box_size,
prange = prange, vrange = vrange,dpi=dpi)
# Load simulated data if you want to do something with it
# If not, you can ignore this function
summary,fullout = loadsim(disease_name)
###Output
_____no_output_____
###Markdown
If you want to draw the simulated data, you can use the following function. After running, it will also generate a log file `imginfo.txt`. Please do not delete it if you want to continue making a vide out of these figures.
###Code
# Plot the simulated data in each step (dt) into a PNG figure
# to avoid plotting too many figures, one can ask the program to skip some of the steps by using skip
# How many steps should be skipped between two plots. If it is None then no step will be skipped
skip = 100
drawsim(disease_name,skip=skip,dpi=dpi)
###Output
Drawing process has completed in 25.3 seconds.
Log file imginfo.txt saved!
###Markdown
When the `disease_name` is given, it will search the folder that stores the images plotted by `drawsim` via the log file `imginfo.txt`.
###Code
# Make all the output figures into a single video, mp4 format
# This function requires python OpenCV, cv2
# If there are more than a thousand figures, it could take minutes to generate the video
# The fps of the video
fps = 10
mkvideo(disease_name,fps=fps)
###Output
_____no_output_____
###Markdown
Multiprocessing support for figure drawingThe figure drawing function `drawsim` also has a multiprocessing counterpart called `drawsim_mp`. It's still experimental and is not imported automatically by default. You can use the following to import this. It is easily seen that by turning on the multip-core support, depending on the numbers of core available, the speed increases linearly.
###Code
# load multiprocessing drawsim_mp
from drawsim_mp import drawsim_mp
# Experimental multiprocessing draw, it can significantly accelerate the drawing speed
# If you encounter problems, please turn off this function and let me know the issue
cores = 4
skip = 100
drawsim_mp('C-Flu-2019',skip=skip,dpi=dpi,cores=cores)
###Output
Drawing process has completed in 7.97 seconds..
Log file imginfo.txt saved!
|
notebooks/04.4 Model Evaluation and Scoring Metrics.ipynb | ###Markdown
Model evaluation and imbalanced cost or data============================================= While we already went into some detail on how to evaluate a model and how to pick the best model, so far we assumed we are given a measure of the quality of the model. What measure one should use is not always obvious, though.The default scores in scikit-learn are ``accuracy`` for classification, which is the fraction of correctly classified samples, and ``r2`` for regression, with is the coefficient of determination.These are reasonable default choices, but not always right.Let's look at classification in more detail, going back to the application of classifying handwritten digits.Let's train a classifier and look at the different ways we can evaluate it. Scikit-learn has many helpful methods in the ``sklearn.metrics`` module.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(precision=2)
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.svm import LinearSVC
digits = load_digits()
X, y = digits.data, digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
classifier = LinearSVC().fit(X_train, y_train)
y_test_pred = classifier.predict(X_test)
print("Accuracy: %f" % classifier.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
Here, we predicted 94.4% of samples correctly. For multi-class problems, it is often interesting to know which of the classes are hard to predict, and which are easy, or which classes get confused. One way to get more information about misclassifications is ``the confusion_matrix``, which shows for each true class, how frequent a given predicted outcome is.
###Code
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_test_pred)
###Output
_____no_output_____
###Markdown
A plot is sometimes more readable:
###Code
plt.matshow(confusion_matrix(y_test, y_test_pred))
plt.colorbar()
plt.xlabel("Predicted label")
plt.ylabel("True label")
###Output
_____no_output_____
###Markdown
We can see that most entries are on the diagonal, which means that we predicted nearly all samples correctly. The off-diagonal entries show us that many eights were classified as ones, and that nines are likely to be confused with many other classes. Another useful function is the ``classification_report`` which provides precision, recall, fscore and support for all classes.Precision is how many of the predictions for a class are actually that class. With TP, FP, TN, FN standing for "true positive", "false positive", "true negative" and "false negative" repectively: Precision = TP / (TP + FP) Recall is how many of the true positives were recovered: Recall = TP / (TP + FN) f1-score is the geometric average of precision and recall.
###Code
from sklearn.metrics import classification_report
print(classification_report(y_test, y_test_pred))
###Output
_____no_output_____
###Markdown
These metrics are helpful in two particular cases that come up often in practice:1. Imbalanced classes, that is one class might be much more frequent then the other.2. Asymmetric costs, that is one kind of error is much more "costly" than the other. Let's have a look at 1. first. Say we have a class imbalance of 1:9, which is rather mild (think about ad-click-prediction where maybe 0.001% of ads might be clicked).As a toy example, let's say we want to classify the digits three against all other digits:
###Code
X, y = digits.data, digits.target == 3
###Output
_____no_output_____
###Markdown
Now we run cross-validation on a classifier to see how well it does:
###Code
from sklearn.cross_validation import cross_val_score
from sklearn.svm import SVC
cross_val_score(SVC(), X, y)
###Output
_____no_output_____
###Markdown
Our classifier is 90% accurate. Is that good? Or bad? Keep in mind that 90% of the data is "not three". So let's see how well a dummy classifier does, that always predicts the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cross_val_score(DummyClassifier("most_frequent"), X, y)
###Output
_____no_output_____
###Markdown
Also 90% (as expected)! So one might thing that means our classifier is not very good, it doesn't to better than a simple strategy that doesn't even look at the data.That would be judging too quickly, though. Accuracy is simply not a good way to evaluate classifiers for imbalanced datasets!ROC Curves=======A much better measure is using the so-called ROC (Receiver operating characteristics) curve. A roc-curve works with uncertainty outputs of a classifier, say the "decision_function" of the ``SVC`` we trained above. Instead of making a cut-off at zero and looking at classification outcomes, it looks at every possible cut-off and records how many true positive predictions there are, and how many false positive predictions there are.The following plot compares the roc curve of three parameter settings of our classifier on the "three vs rest" task.
###Code
from sklearn.metrics import roc_curve, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
for gamma in [.01, .05, 1]:
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate (recall)")
svm = SVC(gamma=gamma).fit(X_train, y_train)
decision_function = svm.decision_function(X_test)
fpr, tpr, _ = roc_curve(y_test, decision_function)
acc = svm.score(X_test, y_test)
auc = roc_auc_score(y_test, svm.decision_function(X_test))
plt.plot(fpr, tpr, label="acc:%.2f auc:%.2f" % (acc, auc), linewidth=3)
plt.legend(loc="best")
###Output
_____no_output_____
###Markdown
With a very small decision threshold, there will be few false positives, but also few false negatives, while with a very high threshold, both true positive rate and false positive rate will be high. So in general, the curve will be from the lower left to the upper right. A diagonal line reflects chance performance, while the goal is to be as much in the top left corner as possible. This means giving a higher decision_function value to all positive samples than to any negative sample.In this sense, this curve only considers the ranking of the positive and negative samples, not the actual value.As you can see from the curves and the accuracy values in the legend, even though all classifiers have the same accuracy, 89%, which is even lower than the dummy classifier, one of them has a perfect roc curve, while one of them performs on chance level.For doing grid-search and cross-validation, we usually want to condense our model evaluation into a single number. A good way to do this with the roc curve is to use the area under the curve (AUC).We can simply use this in ``cross_val_score`` by specifying ``scoring="roc_auc"``:
###Code
from sklearn.cross_validation import cross_val_score
cross_val_score(SVC(), X, y, scoring="roc_auc")
###Output
_____no_output_____
###Markdown
Built-In and custom scoring functions======================================= There are many more scoring methods available, which are useful for different kinds of tasks. You can find them in the "SCORERS" dictionary. The only documentation explains all of them.
###Code
from sklearn.metrics.scorer import SCORERS
print(SCORERS.keys())
###Output
_____no_output_____
###Markdown
It is also possible to define your own scoring metric. Instead of a string, you can provide a callable to as ``scoring`` parameter, that is an object with a ``__call__`` method or a function.It needs to take a model, a test-set features ``X_test`` and test-set labels ``y_test``, and return a float. Higher floats are taken to mean better models.Let's reimplement the standard accuracy score:
###Code
def my_accuracy_scoring(est, X, y):
return np.mean(est.predict(X) == y)
cross_val_score(SVC(), X, y, scoring=my_accuracy_scoring)
###Output
_____no_output_____
###Markdown
The interesting thing about this interface is that we can access any attributes of the estimator we trained. Let's say we have trained a linear model, and we want to penalize having non-zero coefficients in our model selection:
###Code
def my_super_scoring(est, X, y):
return np.mean(est.predict(X) == y) - np.mean(est.coef_ != 0)
###Output
_____no_output_____
###Markdown
We can evaluate if this worked as expected, by grid-searching over l1 and l2 penalties in a linear SVM. An l1 penalty is expected to produce exactly zero coefficients:
###Code
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
y = digits.target
grid = GridSearchCV(LinearSVC(C=.01, dual=False),
param_grid={'penalty' : ['l1', 'l2']},
scoring=my_super_scoring)
grid.fit(X, y)
print(grid.best_params_)
###Output
_____no_output_____ |
P4_Stock/.ipynb_checkpoints/stockdemo-checkpoint.ipynb | ###Markdown
Load newest Google stock data from Yahoo Finance
###Code
#load data
start = dt.datetime(1995,1,1)
end = dt.date.today()
data = pandas_datareader.data.DataReader('GOOG','yahoo',start,end)
data.head()
###Output
_____no_output_____
###Markdown
Normalise and Prepozess the data like a boss^12
###Code
#normalise data
data_n = stock_data_preprocessing.normalise_stock_data(data)
data_n.head()
###Output
_____no_output_____
###Markdown
1,2,3 Plot Line!
###Code
stock_data_preprocessing.stock_plot((data_n,))
###Output
_____no_output_____
###Markdown
Pickup the Data
###Code
# training data
prediction_time = 1 #day
testdatasize = 450
unroll_length = 50
testdatacut = testdatasize + unroll_length + 1
x_train = data_n[0:-prediction_time-testdatacut].as_matrix()
y_train = data_n[prediction_time:-testdatacut ]['Normalised Close'].as_matrix()
# test data
x_test = data_n[0-testdatacut:-prediction_time].as_matrix()
y_test = data_n[prediction_time-testdatacut: ]['Normalised Close'].as_matrix()
###Output
_____no_output_____
###Markdown
unroll it
###Code
def unroll(data,sequence_length=24):
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length])
return np.asarray(result)
x_train = unroll(x_train,unroll_length)
x_test = unroll(x_test,unroll_length)
y_train = y_train[-x_train.shape[0]:]
y_test = y_test[-x_test.shape[0]:]
print("x_train", x_train.shape)
print("y_train", y_train.shape)
print("x_test", x_test.shape)
print("y_test", y_test.shape)
###Output
x_train (2604, 50, 12)
y_train (2604,)
x_test (450, 50, 12)
y_test (450,)
###Markdown
Go! (with Python 3.5, Keras 1.2.2 and Tensorflow 1.0, better on AWS)
###Code
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import lstm, time #helper libraries
#Step 2 Build Model
model = Sequential()
model.add(LSTM(
input_dim=x_train.shape[-1],
output_dim=50,
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
100,
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(
output_dim=1))
model.add(Activation('linear'))
start = time.time()
model.compile(loss='mse', optimizer='rmsprop')
print('compilation time : {}'.format(time.time() - start))
#Step 3 Train the model
model.fit(
x_train,
y_train,
batch_size=3028,
nb_epoch=350,
validation_split=0.05)
###Output
Train on 2473 samples, validate on 131 samples
Epoch 1/350
2473/2473 [==============================] - 1s - loss: 0.1511 - val_loss: 1.5176
Epoch 2/350
2473/2473 [==============================] - 0s - loss: 1.5853 - val_loss: 0.0046
Epoch 3/350
2473/2473 [==============================] - 0s - loss: 0.0503 - val_loss: 0.0038
Epoch 4/350
2473/2473 [==============================] - 0s - loss: 0.0471 - val_loss: 4.8843e-04
Epoch 5/350
2473/2473 [==============================] - 0s - loss: 0.0443 - val_loss: 3.7705e-04
Epoch 6/350
2473/2473 [==============================] - 0s - loss: 0.0438 - val_loss: 7.4141e-04
Epoch 7/350
2473/2473 [==============================] - 0s - loss: 0.0422 - val_loss: 5.0372e-04
Epoch 8/350
2473/2473 [==============================] - 0s - loss: 0.0413 - val_loss: 7.1282e-04
Epoch 9/350
2473/2473 [==============================] - 0s - loss: 0.0410 - val_loss: 6.9578e-04
Epoch 10/350
2473/2473 [==============================] - 0s - loss: 0.0399 - val_loss: 7.0684e-04
Epoch 11/350
2473/2473 [==============================] - 0s - loss: 0.0392 - val_loss: 3.5026e-04
Epoch 12/350
2473/2473 [==============================] - 0s - loss: 0.0386 - val_loss: 3.4143e-04
Epoch 13/350
2473/2473 [==============================] - 0s - loss: 0.0362 - val_loss: 3.7493e-04
Epoch 14/350
2473/2473 [==============================] - 0s - loss: 0.0367 - val_loss: 4.1753e-04
Epoch 15/350
2473/2473 [==============================] - 0s - loss: 0.0337 - val_loss: 0.0010
Epoch 16/350
2473/2473 [==============================] - 0s - loss: 0.0334 - val_loss: 8.9436e-04
Epoch 17/350
2473/2473 [==============================] - 0s - loss: 0.0349 - val_loss: 0.0078
Epoch 18/350
2473/2473 [==============================] - 0s - loss: 0.0420 - val_loss: 0.0219
Epoch 19/350
2473/2473 [==============================] - 0s - loss: 0.0601 - val_loss: 0.0589
Epoch 20/350
2473/2473 [==============================] - 0s - loss: 0.0924 - val_loss: 0.0376
Epoch 21/350
2473/2473 [==============================] - 0s - loss: 0.0674 - val_loss: 0.0162
Epoch 22/350
2473/2473 [==============================] - 0s - loss: 0.0515 - val_loss: 0.0115
Epoch 23/350
2473/2473 [==============================] - 0s - loss: 0.0371 - val_loss: 0.0029
Epoch 24/350
2473/2473 [==============================] - 0s - loss: 0.0328 - val_loss: 0.0034
Epoch 25/350
2473/2473 [==============================] - 0s - loss: 0.0307 - val_loss: 0.0032
Epoch 26/350
2473/2473 [==============================] - 0s - loss: 0.0291 - val_loss: 0.0036
Epoch 27/350
2473/2473 [==============================] - 0s - loss: 0.0291 - val_loss: 0.0028
Epoch 28/350
2473/2473 [==============================] - 0s - loss: 0.0279 - val_loss: 0.0023
Epoch 29/350
2473/2473 [==============================] - 0s - loss: 0.0269 - val_loss: 0.0019
Epoch 30/350
2473/2473 [==============================] - 0s - loss: 0.0276 - val_loss: 0.0046
Epoch 31/350
2473/2473 [==============================] - 0s - loss: 0.0290 - val_loss: 0.0053
Epoch 32/350
2473/2473 [==============================] - 0s - loss: 0.0276 - val_loss: 0.0098
Epoch 33/350
2473/2473 [==============================] - 0s - loss: 0.0338 - val_loss: 0.0285
Epoch 34/350
2473/2473 [==============================] - 0s - loss: 0.0440 - val_loss: 0.0294
Epoch 35/350
2473/2473 [==============================] - 0s - loss: 0.0508 - val_loss: 0.0383
Epoch 36/350
2473/2473 [==============================] - 0s - loss: 0.0542 - val_loss: 0.0204
Epoch 37/350
2473/2473 [==============================] - 0s - loss: 0.0480 - val_loss: 0.0184
Epoch 38/350
2473/2473 [==============================] - 0s - loss: 0.0365 - val_loss: 0.0071
Epoch 39/350
2473/2473 [==============================] - 0s - loss: 0.0272 - val_loss: 0.0052
Epoch 40/350
2473/2473 [==============================] - 0s - loss: 0.0248 - val_loss: 0.0029
Epoch 41/350
2473/2473 [==============================] - 0s - loss: 0.0219 - val_loss: 0.0013
Epoch 42/350
2473/2473 [==============================] - 0s - loss: 0.0195 - val_loss: 9.8827e-04
Epoch 43/350
2473/2473 [==============================] - 0s - loss: 0.0193 - val_loss: 9.9133e-04
Epoch 44/350
2473/2473 [==============================] - 0s - loss: 0.0188 - val_loss: 0.0019
Epoch 45/350
2473/2473 [==============================] - 0s - loss: 0.0173 - val_loss: 0.0017
Epoch 46/350
2473/2473 [==============================] - 0s - loss: 0.0187 - val_loss: 0.0052
Epoch 47/350
2473/2473 [==============================] - 0s - loss: 0.0202 - val_loss: 0.0064
Epoch 48/350
2473/2473 [==============================] - 0s - loss: 0.0223 - val_loss: 0.0127
Epoch 49/350
2473/2473 [==============================] - 0s - loss: 0.0277 - val_loss: 0.0237
Epoch 50/350
2473/2473 [==============================] - 0s - loss: 0.0361 - val_loss: 0.0246
Epoch 51/350
2473/2473 [==============================] - 0s - loss: 0.0415 - val_loss: 0.0241
Epoch 52/350
2473/2473 [==============================] - 0s - loss: 0.0361 - val_loss: 0.0114
Epoch 53/350
2473/2473 [==============================] - 0s - loss: 0.0260 - val_loss: 0.0065
Epoch 54/350
2473/2473 [==============================] - 0s - loss: 0.0207 - val_loss: 0.0040
Epoch 55/350
2473/2473 [==============================] - 0s - loss: 0.0180 - val_loss: 0.0030
Epoch 56/350
2473/2473 [==============================] - 0s - loss: 0.0166 - val_loss: 0.0025
Epoch 57/350
2473/2473 [==============================] - 0s - loss: 0.0155 - val_loss: 0.0021
Epoch 58/350
2473/2473 [==============================] - 0s - loss: 0.0155 - val_loss: 0.0031
Epoch 59/350
2473/2473 [==============================] - 0s - loss: 0.0155 - val_loss: 0.0039
Epoch 60/350
2473/2473 [==============================] - 0s - loss: 0.0162 - val_loss: 0.0046
Epoch 61/350
2473/2473 [==============================] - 0s - loss: 0.0172 - val_loss: 0.0083
Epoch 62/350
2473/2473 [==============================] - 0s - loss: 0.0179 - val_loss: 0.0083
Epoch 63/350
2473/2473 [==============================] - 0s - loss: 0.0203 - val_loss: 0.0114
Epoch 64/350
2473/2473 [==============================] - 0s - loss: 0.0204 - val_loss: 0.0095
Epoch 65/350
2473/2473 [==============================] - 0s - loss: 0.0227 - val_loss: 0.0110
Epoch 66/350
2473/2473 [==============================] - 0s - loss: 0.0209 - val_loss: 0.0083
Epoch 67/350
2473/2473 [==============================] - 0s - loss: 0.0198 - val_loss: 0.0083
Epoch 68/350
2473/2473 [==============================] - 0s - loss: 0.0166 - val_loss: 0.0046
Epoch 69/350
2473/2473 [==============================] - 0s - loss: 0.0143 - val_loss: 0.0039
Epoch 70/350
2473/2473 [==============================] - 0s - loss: 0.0133 - val_loss: 0.0021
Epoch 71/350
2473/2473 [==============================] - 0s - loss: 0.0121 - val_loss: 0.0033
Epoch 72/350
2473/2473 [==============================] - 0s - loss: 0.0111 - val_loss: 0.0017
Epoch 73/350
2473/2473 [==============================] - 0s - loss: 0.0112 - val_loss: 0.0032
Epoch 74/350
2473/2473 [==============================] - 0s - loss: 0.0112 - val_loss: 0.0031
Epoch 75/350
2473/2473 [==============================] - 0s - loss: 0.0115 - val_loss: 0.0045
Epoch 76/350
2473/2473 [==============================] - 0s - loss: 0.0126 - val_loss: 0.0053
Epoch 77/350
2473/2473 [==============================] - 0s - loss: 0.0140 - val_loss: 0.0084
Epoch 78/350
2473/2473 [==============================] - 0s - loss: 0.0155 - val_loss: 0.0084
Epoch 79/350
2473/2473 [==============================] - 0s - loss: 0.0167 - val_loss: 0.0113
Epoch 80/350
2473/2473 [==============================] - 0s - loss: 0.0175 - val_loss: 0.0059
Epoch 81/350
2473/2473 [==============================] - 0s - loss: 0.0162 - val_loss: 0.0053
Epoch 82/350
2473/2473 [==============================] - 0s - loss: 0.0132 - val_loss: 0.0044
Epoch 83/350
2473/2473 [==============================] - 0s - loss: 0.0107 - val_loss: 0.0030
Epoch 84/350
2473/2473 [==============================] - 0s - loss: 0.0098 - val_loss: 0.0015
Epoch 85/350
2473/2473 [==============================] - 0s - loss: 0.0085 - val_loss: 0.0013
Epoch 86/350
2473/2473 [==============================] - 0s - loss: 0.0080 - val_loss: 0.0011
Epoch 87/350
2473/2473 [==============================] - 0s - loss: 0.0077 - val_loss: 0.0013
Epoch 88/350
2473/2473 [==============================] - 0s - loss: 0.0073 - val_loss: 0.0021
Epoch 89/350
2473/2473 [==============================] - 0s - loss: 0.0079 - val_loss: 0.0030
Epoch 90/350
2473/2473 [==============================] - 0s - loss: 0.0081 - val_loss: 0.0030
Epoch 91/350
2473/2473 [==============================] - 0s - loss: 0.0091 - val_loss: 0.0056
Epoch 92/350
2473/2473 [==============================] - 0s - loss: 0.0107 - val_loss: 0.0062
Epoch 93/350
2473/2473 [==============================] - 0s - loss: 0.0121 - val_loss: 0.0087
Epoch 94/350
2473/2473 [==============================] - 0s - loss: 0.0130 - val_loss: 0.0071
Epoch 95/350
2473/2473 [==============================] - 0s - loss: 0.0126 - val_loss: 0.0053
Epoch 96/350
2473/2473 [==============================] - 0s - loss: 0.0103 - val_loss: 0.0035
Epoch 97/350
2473/2473 [==============================] - 0s - loss: 0.0090 - val_loss: 0.0032
Epoch 98/350
2473/2473 [==============================] - 0s - loss: 0.0075 - val_loss: 0.0020
Epoch 99/350
2473/2473 [==============================] - 0s - loss: 0.0067 - val_loss: 0.0018
Epoch 100/350
2473/2473 [==============================] - 0s - loss: 0.0065 - val_loss: 0.0015
Epoch 101/350
2473/2473 [==============================] - 0s - loss: 0.0060 - val_loss: 0.0018
Epoch 102/350
2473/2473 [==============================] - 0s - loss: 0.0063 - val_loss: 0.0017
Epoch 103/350
2473/2473 [==============================] - 0s - loss: 0.0061 - val_loss: 0.0021
Epoch 104/350
2473/2473 [==============================] - 0s - loss: 0.0062 - val_loss: 0.0026
Epoch 105/350
2473/2473 [==============================] - 0s - loss: 0.0069 - val_loss: 0.0035
Epoch 106/350
2473/2473 [==============================] - 0s - loss: 0.0081 - val_loss: 0.0050
Epoch 107/350
2473/2473 [==============================] - 0s - loss: 0.0091 - val_loss: 0.0063
Epoch 108/350
2473/2473 [==============================] - 0s - loss: 0.0096 - val_loss: 0.0063
Epoch 109/350
2473/2473 [==============================] - 0s - loss: 0.0094 - val_loss: 0.0038
Epoch 110/350
2473/2473 [==============================] - 0s - loss: 0.0080 - val_loss: 0.0026
Epoch 111/350
2473/2473 [==============================] - 0s - loss: 0.0066 - val_loss: 0.0028
Epoch 112/350
###Markdown
Same Model & some (~ x10) fewer Loss !!! :)
###Code
#Step 4 - Plot the predictions!
predictions = lstm.predict_sequences_multiple(model, x_test, 50, 50)
lstm.plot_results_multiple(predictions, y_test, 50)
###Output
_____no_output_____ |
dmc + multi-agent + rlpyt.ipynb | ###Markdown
Load multi-agent soccer environment (from deepmind control locomotion suite)
###Code
import numpy as np
from dm_control.locomotion import soccer as dm_soccer
import matplotlib.pyplot as plt
%matplotlib inline
import time
# Load the 2-vs-2 soccer environment with episodes of 10 seconds:
dm_env = dm_soccer.load(team_size=2, time_limit=10.)
# Retrieves action_specs for all 4 players.
action_specs = dm_env.action_spec()
# Step through the environment for one episode with random actions.
time_step = dm_env.reset()
while not time_step.last():
actions = []
for action_spec in action_specs:
action = np.random.uniform(
action_spec.minimum, action_spec.maximum, size=action_spec.shape)
actions.append(action)
time_step = dm_env.step(actions)
img = dm_env.physics.render()
plt.imshow(img)
plt.show()
time.sleep(1)
# for i in range(len(action_specs)):
# (
# "Player {}: reward = {}, discount = {}, observations = {}.".format(
# i, time_step.reward[i], time_step.discount,
# time_step.observation[i]))
from dm_control import viewer
import functools
viewer.launch(environment_loader=functools.partial(dm_soccer.load, team_size=2))
###Output
_____no_output_____
###Markdown
Create gym wrapper
###Code
import gym
import dm_control2gym as dmc2gym
env = dmc2gym.wrapper.DmControlWrapper('', '', env=dm_env)
###Output
_____no_output_____
###Markdown
Test gym interface
###Code
obs, rew, done, info = env._step([np.array([0,0,0]) for i in range(4)])
print(obs)
###Output
WARNING: Logging before flag parsing goes to stderr.
W0927 07:05:19.402275 140527511402240 entity.py:478] Cannot set velocity on Entity with no free joint.
[2019-09-27 07:05:19,402] Cannot set velocity on Entity with no free joint.
W0927 07:05:19.405369 140527511402240 entity.py:478] Cannot set velocity on Entity with no free joint.
[2019-09-27 07:05:19,405] Cannot set velocity on Entity with no free joint.
W0927 07:05:19.407356 140527511402240 entity.py:478] Cannot set velocity on Entity with no free joint.
[2019-09-27 07:05:19,407] Cannot set velocity on Entity with no free joint.
W0927 07:05:19.409510 140527511402240 entity.py:478] Cannot set velocity on Entity with no free joint.
[2019-09-27 07:05:19,409] Cannot set velocity on Entity with no free joint.
W0927 07:05:19.411519 140527511402240 entity.py:478] Cannot set velocity on Entity with no free joint.
[2019-09-27 07:05:19,411] Cannot set velocity on Entity with no free joint.
W0927 07:05:19.413496 140527511402240 entity.py:478] Cannot set velocity on Entity with no free joint.
[2019-09-27 07:05:19,413] Cannot set velocity on Entity with no free joint.
W0927 07:05:19.415046 140527511402240 entity.py:478] Cannot set velocity on Entity with no free joint.
[2019-09-27 07:05:19,415] Cannot set velocity on Entity with no free joint.
W0927 07:05:19.416534 140527511402240 entity.py:478] Cannot set velocity on Entity with no free joint.
[2019-09-27 07:05:19,416] Cannot set velocity on Entity with no free joint.
###Markdown
rlpyt Training Example
###Code
import sys
import gym
from rlpyt.utils.launching.affinity import affinity_from_code
from rlpyt.samplers.parallel.cpu.sampler import CpuSampler
from rlpyt.samplers.parallel.cpu.collectors import CpuResetCollector
from rlpyt.envs.gym import make as gym_make
from rlpyt.algos.pg.ppo import PPO
from rlpyt.agents.pg.mujoco import MujocoFfAgent
from rlpyt.runners.minibatch_rl import MinibatchRl
from rlpyt.utils.logging.context import logger_context
from rlpyt.utils.launching.variant import load_variant, update_config
from rlpyt.experiments.configs.mujoco.pg.mujoco_ppo import configs
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = CpuSampler(
EnvCls=gym_make,
env_kwargs=config["env"],
CollectorCls=CpuResetCollector,
**config["sampler"]
)
algo = PPO(optim_kwargs=config["optim"], **config["algo"])
agent = MujocoFfAgent(model_kwargs=config["model"], **config["agent"])
runner = MinibatchRl(
algo=algo,
agent=agent,
sampler=sampler,
affinity=affinity,
**config["runner"]
)
name = config["env"]["id"]
with logger_context(log_dir, run_ID, name, config):
runner.train()
if __name__ == "__main__":
build_and_train(*sys.argv[1:])
import gym
print(gym.__version__)
import gym.spaces.dict
###Output
_____no_output_____ |
Iterated Dilated convolution.ipynb | ###Markdown
Make dataset
###Code
class SentenceDataset(Dataset):
def __init__(
self,
sentence_tags_items,
transform,
vocab,
char_vocab,
tag_vocab
):
self.sentence_tags_items = sentence_tags_items
self.transform = transform
self.vocab = vocab
self.char_vocab = char_vocab
self.tag_vocab = tag_vocab
def __getitem__(self, idx):
word_tensor, char_tensor, tag_tensor, seq_len = self.transform(
self.sentence_tags_items[idx],
self.vocab,
self.char_vocab,
self.tag_vocab
)
word_tensor = torch.from_numpy(np.asarray(word_tensor))#.view(-1, 1)
char_tensor = torch.from_numpy(np.asarray(char_tensor))
tag_tensor = torch.from_numpy(np.asarray(tag_tensor))#.view(-1, 1)
seq_len = torch.from_numpy(np.asarray([seq_len]))
return word_tensor, char_tensor, tag_tensor, seq_len
def __len__(self):
return len(self.sentence_tags_items)
sentence_tag_items = [
(
["dog", "cat", "dog", "puppy"],
["animal_class", "animal_class", "animal_class", "offspring"]
),
(
["dog", "cat", "cat", "puppy"],
["animal_class", "animal_class", "animal_class", "offspring"]
),
(
["dog", "puppy", "dog", "puppy"],
["animal_class", "offspring", "animal_class", "offspring"]
),
]
sent_dataset = SentenceDataset(
sentence_tag_items,
transform,
VOCAB,
CHAR_VOCAB,
TAG_VOCAB
)
train_loader = DataLoader(sent_dataset, batch_size=10, shuffle=True, num_workers=1)
word_tensors, char_tensors, tag_tensors, seq_len = next(iter(train_loader))
word_tensors.size(), char_tensors.size(), tag_tensors.size(), seq_len.size()
seq_len.size()
###Output
_____no_output_____
###Markdown
Train model
###Code
conv1d = torch.nn.Conv1d(5, 10, 1, dilation=2)
torch.rand(2,5,4).size()
conv1d(Variable(torch.rand(2,5,4), requires_grad=False)).size()
emb = torch.nn.Embedding(10, 5)
embeddings = emb(Variable(torch.LongTensor([[1,2,4,5],[4,3,2,9]]), requires_grad=False))
embeddings.size()
embeddings.permute(0, 2, 1).size()
conv1d(embeddings.permute(0, 2, 1))
conv1d(embeddings.permute(0, 2, 1)).max(2)[1].size()
embeddings.unsqueeze(1).size()
char_tensors.size()
char_tensors.view(-1, 15).view(3, 10, -1).shape
class CharCNN(torch.nn.Module):
def __init__(self):
super(CharCNN, self).__init__()
self.char_embedding=4
self.char_conv_features=5
self.char_conv_kernel=1
self.char_emb = torch.nn.Embedding(
len(CHAR_VOCAB),
self.char_embedding
)
self.char_conv1d = torch.nn.Conv1d(
self.char_embedding,
self.char_conv_features,
self.char_conv_kernel
)
self.output_size = self.char_conv_features
def forward(self, char_tensors):
batch_size, seqlen, char_seqlen = char_tensors.size()
char_tensors = char_tensors.view(-1, char_seqlen)
char_tensors = self.char_emb(char_tensors)
char_tensors = char_tensors.permute(0, 2, 1)
char_tensors = self.char_conv1d(char_tensors)
char_tensors = char_tensors.max(2)[0] # Get the global max
char_tensors = char_tensors.view(batch_size, seqlen, -1)
return char_tensors
char_tensors.shape
char_model = CharCNN()
char_tensors.size()
char_model(Variable(char_tensors, requires_grad=False)).size()
torch.cat((char_tensors, char_tensors), -1).size()
embeddings.max(0)
###Output
_____no_output_____
###Markdown
Word model
###Code
class WordEmbeddings(torch.nn.Module):
def __init__(
self,
char_model,
):
super(WordEmbeddings, self).__init__()
self.char_model = char_model
self.word_embedding = 10
self.word_emb = torch.nn.Embedding(
len(VOCAB),
self.word_embedding
)
self.output_size = (
self.word_embedding
+ self.char_model.output_size
)
def forward(self, word_tensors, char_tensors):
char_based_embs = self.char_model(char_tensors)
#print(char_based_embs.size(), type(char_based_embs.data))
word_embs = self.word_emb(word_tensors)
#print(word_embs.size(), type(word_embs.data))
word_embs = torch.cat(
[word_embs, char_based_embs],
-1
) # Concat word and char based embeddings
return word_embs
word_model = WordEmbeddings(char_model)
word_tensors.size(), char_tensors.size()
word_model(
Variable(word_tensors, requires_grad=False),
Variable(char_tensors, requires_grad=False)
).size()
###Output
_____no_output_____
###Markdown
ID CNN modelhttps://arxiv.org/pdf/1702.02098.pdf
###Code
class ID_CNN(torch.nn.Module):
"""ID CNN Encoder
Input: (batch, input_dims, seqlen)
Outpus: (batch, input_dims, seqlen)
"""
def __init__(
self,
input_dims,
dialation_block_depth=5,
field_of_view=2,
block_stacks=2
):
super(ID_CNN, self).__init__()
# We want to make the input emb same as output emb
# This allows us to recursively stack the layers.
self.conv_features = input_dims
self.conv_kernel = 3
self.block_stacks = block_stacks
self.word_char_conv1d = torch.nn.Sequential(
*[
torch.nn.Sequential(
torch.nn.Conv1d(
input_dims,
self.conv_features,
kernel_size=self.conv_kernel,
padding=field_of_view**i,
dilation=field_of_view**i
),
torch.nn.ReLU()
)
for i in range(dialation_block_depth)
]
)
def forward(self, seq_scores):
for block_idx in range(self.block_stacks):
seq_scores = self.word_char_conv1d(seq_scores)
return seq_scores
class IDCNNEncoder(torch.nn.Module):
"""IDCNNEncoder - Encodes word and char based sentence
Input:
word_tensors - (batch, seqlen),
char_tensors - (batch, seqlen, char_seqlen)
"""
def __init__(
self,
word_model,
):
super(IDCNNEncoder, self).__init__()
self.word_model = word_model
self.id_cnn = ID_CNN(self.word_model.output_size)
def forward(self, word_tensors, char_tensors):
word_embs = self.word_model(word_tensors, char_tensors)
word_embs = word_embs.permute(0, 2, 1)
seq_scores = self.id_cnn(word_embs)
return seq_scores
class IDCNNDecoder(torch.nn.Module):
def __init__(
self,
input_dims,
num_classes,
decoder_layers=3
):
super(IDCNNDecoder, self).__init__()
self.input_dims = input_dims
self.num_classes = num_classes
self.decoder_layers = decoder_layers
self.transform_layer = torch.nn.Sequential(
torch.nn.Linear(self.input_dims, self.num_classes),
torch.nn.ReLU()
)
self.create_decoder_layers()
def create_decoder_layers(self):
self.id_cnn = torch.nn.ModuleList(
[
ID_CNN(self.num_classes, self.num_classes, block_stacks=1)
for i in range(self.decoder_layers)
]
)
def forward(self, seq_scores):
outputs = []
batch, input_dims, seqlen = seq_scores.size()
seq_scores = seq_scores.permute(0, 2, 1).contiguous()
seq_scores = seq_scores.view(batch*seqlen, input_dims)
seq_scores = self.transform_layer(seq_scores)
seq_scores = seq_scores.view(batch, seqlen, self.num_classes)
seq_scores = seq_scores.permute(0, 2, 1)
for id_cnn in self.id_cnn:
seq_scores = id_cnn(seq_scores)
outputs.append(seq_scores)
return outputs
id_cnn = IDCNNEncoder(word_model)
word_tensors.size(), char_tensors.size()
id_cnn(
Variable(word_tensors, requires_grad=False),
Variable(char_tensors, requires_grad=False)
).size()
id_cnn_decoder = IDCNNDecoder(15, len(TAG_VOCAB))
decoder_outputs = id_cnn_decoder(id_cnn(
Variable(word_tensors, requires_grad=False),
Variable(char_tensors, requires_grad=False)
))
[output.size() for output in decoder_outputs]
def get_loss(decoder_outputs, target, loss_fn):
batch, seqlen = target.size()[:2]
#target = target.unsqueeze(2).permute(0,2,1).contiguous().view(-1, 1).squeeze()
target = target.view(-1)
#print(target.size())
loss = None
for output in decoder_outputs:
output = output.permute(0,2,1).contiguous().view(-1, output.size()[1])
#print(output.size())
if loss is None:
loss = loss_fn(output, target)
else:
loss += loss_fn(output, target)
return loss
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=0)
decoder_outputs[0].permute(0,2,1).contiguous().view(-1, decoder_outputs[0].size()[1]).size()
get_loss(decoder_outputs, Variable(tag_tensors, requires_grad=False), loss_fn)
###Output
_____no_output_____
###Markdown
Train model
###Code
def train(encoder, decoder, dataloader, num_epochs, history=None):
if history is None:
history = []
cuda = torch.cuda.is_available()
if cuda:
encoder.cuda()
decoder.cuda()
optimizer = torch.optim.Adam(list(encoder.parameters()) + list(decoder.parameters()))
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=0)
for i in range(num_epochs):
per_epoch_losses = []
for batch in dataloader:
word_tensors = Variable(batch[0], requires_grad=False)
char_tensors = Variable(batch[1], requires_grad=False)
tag_tensors = Variable(batch[2], requires_grad=False)
seq_len = Variable(batch[3], requires_grad=False)
if cuda:
word_tensors = word_tensors.cuda()
char_tensors = char_tensors.cuda()
tag_tensors = tag_tensors.cuda()
optimizer.zero_grad()
encoding = encoder(word_tensors, char_tensors)
outputs = decoder(encoding)
loss = get_loss(outputs, tag_tensors, loss_fn)
loss.backward()
optimizer.step()
per_epoch_losses.append(loss.data[0])
history.append(np.mean(per_epoch_losses))
print('epoch[%d] loss: %.4f' % (i, loss.data[0]))
return history
char_model = CharCNN()
word_model = WordEmbeddings(char_model)
id_cnn = IDCNNEncoder(word_model)
id_cnn_decoder = IDCNNDecoder(15, len(TAG_VOCAB))
history = None
history = train(id_cnn, id_cnn_decoder, train_loader, 10, history=history)
###Output
epoch[0] loss: 4.8157
epoch[1] loss: 4.8139
epoch[2] loss: 4.8121
epoch[3] loss: 4.8102
epoch[4] loss: 4.8084
epoch[5] loss: 4.8066
epoch[6] loss: 4.8048
epoch[7] loss: 4.8030
epoch[8] loss: 4.8011
epoch[9] loss: 4.7993
|
information_scarping.ipynb | ###Markdown
dict
###Code
! pip install urbandict
import urbandict
urbandict.define('truck')
! pip install wordsapy
! pip install GoogleNews
from GoogleNews import GoogleNews
googlenews = GoogleNews(lang='en', region='US')
googlenews.set_lang('en')
googlenews.set_period('7d')
googlenews.get_news('CAR')
googlenews.search('CAR')
result = googlenews.page_at(1)
result
! pip install mwthesaurus
from mwthesaurus import MWClient
client = MWClient(key="ede6968f-710a-48f5-b91d-79acf2960001")
# client.get("python")
client.get("python")
key = "5b7622e5-5fe6-4429-b1c0-53a2edce4d1a"
word = "car"
d_key = "c7cad8a9-82f4-494e-8e76-dfaebc7ce8c6"
ith_url_req = f"https://dictionaryapi.com/api/v3/references/ithesaurus/json/{word}?key={key}"
itdict_url = f"https://dictionaryapi.com/api/v3/references/sd3/json/{word}?key={d_key}"
import requests
from requests.exceptions import HTTPError
try:
response = requests.get(itdict_url)
response.raise_for_status()
# access JSOn content
jsonResponse = response.json()
print("Entire JSON response")
print(jsonResponse)
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
import requests
from requests.exceptions import HTTPError
try:
response = requests.get(ith_url_req)
response.raise_for_status()
# access JSOn content
jsonResponse = response.json()
print("Entire JSON response")
print(jsonResponse)
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
! pip install google-api-python-client
! pip install bs4
from googleapiclient.discovery import build #Import the library
api_key = "AIzaSyD8oV9Vs8yjNPLAjWPe6emSZf1E865K7uY"
cse_id = "e714535f954174f5d"
def google_query(query, api_key, cse_id, **kwargs):
query_service = build("customsearch",
"v1",
developerKey=api_key
)
query_results = query_service.cse().list(q=query, # Query
cx=cse_id, # CSE ID
**kwargs
).execute()
return query_results['items']
my_results_list = []
my_results = google_query("what is a car?",
api_key,
cse_id,
num = 10
)
for result in my_results:
my_results_list.append(result['link'])
print(result['link'])
from googleapiclient.discovery import build #Import the library
api_key = "AIzaSyD8oV9Vs8yjNPLAjWPe6emSZf1E865K7uY"
cse_id = "e714535f954174f5d"
def google_query(query, api_key, cse_id, **kwargs):
query_service = build("customsearch",
"v1",
developerKey=api_key
)
query_results = query_service.cse().list(q=query, # Query
cx=cse_id, # CSE ID
**kwargs
).execute()
return query_results['items']
my_results_list = []
my_results = google_query("car",
api_key,
cse_id,
num = 10
)
for result in my_results:
my_results_list.append(result['link'])
print(result['link'])
import requests
import json
# Other
starts = list(x for x in range(1,100,10))
res_dict_lsit = []
for start in starts :
API_KEY = "AIzaSyD8oV9Vs8yjNPLAjWPe6emSZf1E865K7uY"
SEARCH_ENGINE_ID = "e714535f954174f5d"
query = "What is car?"
url = f"https://www.googleapis.com/customsearch/v1?key={API_KEY}&cx={SEARCH_ENGINE_ID}&q={query}"
params = {'num': 10, 'start': start}
res = requests.get(url, params=params).json()
res_dict_lsit.append(res)
# print(json.dumps(res, indent=4, sort_keys=True))
# break
for res in res_dict_lsit:
try:
items = res['items']
for item in items:
title = item['title']
print(title)
print(item['link'])
except KeyError:
print(json.dumps(res, indent=4, sort_keys=True))
# Britannica
starts = list(x for x in range(1,100,10))
res_dict_lsit = []
for start in starts :
API_KEY = "AIzaSyD8oV9Vs8yjNPLAjWPe6emSZf1E865K7uY"
# SEARCH_ENGINE_ID = "e714535f954174f5d"
SEARCH_ENGINE_ID = "13581eb0fad555f88"
query = "What is car?"
url = f"https://www.googleapis.com/customsearch/v1?key={API_KEY}&cx={SEARCH_ENGINE_ID}&q={query}"
params = {'num': 10, 'start': start}
res = requests.get(url, params=params).json()
res_dict_lsit.append(res)
# print(json.dumps(res, indent=4, sort_keys=True))
# break
for res in res_dict_lsit:
try:
items = res['items']
for item in items:
title = item['title']
print(title)
print(item['link'])
except KeyError:
print(json.dumps(res, indent=4, sort_keys=True))
# wikipedia
starts = list(x for x in range(1,100,10))
res_dict_lsit = []
for start in starts :
API_KEY = "AIzaSyD8oV9Vs8yjNPLAjWPe6emSZf1E865K7uY"
SEARCH_ENGINE_ID = "c05384d3c7df544cd"
query = "What is car?"
url = f"https://www.googleapis.com/customsearch/v1?key={API_KEY}&cx={SEARCH_ENGINE_ID}&q={query}"
params = {'num': 10, 'start': start}
res = requests.get(url, params=params).json()
res_dict_lsit.append(res)
for res in res_dict_lsit:
try:
items = res['items']
for item in items:
title = item['title']
# if 'Wikipedia' not in title:
print(title)
except KeyError:
print(json.dumps(res, indent=4, sort_keys=True))
# Credo
starts = list(x for x in range(1,100,10))
res_dict_lsit = []
for start in starts :
API_KEY = "AIzaSyD8oV9Vs8yjNPLAjWPe6emSZf1E865K7uY"
SEARCH_ENGINE_ID = "5d8fe349f2e7066a7"
query = "car"
url = f"https://www.googleapis.com/customsearch/v1?key={API_KEY}&cx={SEARCH_ENGINE_ID}&q={query}"
params = {'num': 10, 'start': start}
res = requests.get(url, params=params).json()
res_dict_lsit.append(res)
# print(json.dumps(res, indent=4, sort_keys=True))
# break
for res in res_dict_lsit:
try:
items = res['items']
for item in items:
title = item['title']
print(title)
print(item['link'])
except KeyError:
print(json.dumps(res, indent=4, sort_keys=True))
import requests
URL = "https://www.britannica.com/technology/automobile"
page = requests.get(URL)
print(page.text)
! pip install requests-html
import requests_html
import csv
import asyncio
# import HTMLSession from requests_html
from requests_html import AsyncHTMLSession
if asyncio.get_event_loop().is_running(): # Only patch if needed (i.e. running in Notebook, Spyder, etc)
import nest_asyncio
nest_asyncio.apply()
urls = ["https://www.britannica.com/technology/automobile"]
async def work(s, url):
resp = await s.get(url)
resp.html.arender()
return resp
async def main(urls):
asession = AsyncHTMLSession()
task = (work(asession, url) for url in urls)
return await asyncio.gather(*task)
results = asyncio.run(main(urls))
import asyncio
# import HTMLSession from requests_html
from requests_html import AsyncHTMLSession
if asyncio.get_event_loop().is_running(): # Only patch if needed (i.e. running in Notebook, Spyder, etc)
import nest_asyncio
nest_asyncio.apply()
# create an HTML Session object
asession = AsyncHTMLSession()
# Use the object above to connect to needed webpage
r = await asession.get("https://www.britannica.com/technology/automobile")
# Run JavaScript code on webpage
# resp.html.arender()
from requests_html import HTMLSession
# create an HTML Session object
session = HTMLSession()
# Use the object above to connect to needed webpage
resp = session.get("https://www.britannica.com/technology/automobile")
# Run JavaScript code on webpage
# resp.html.arender()
import requests
from bs4 import BeautifulSoup
URL = "https://www.britannica.com/technology/automobile"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
soup
# getting all the paragraphs
for para in soup.find_all("p"):
print(para.get_text())
from requests_html import HTMLSession
# create an HTML Session object
session = HTMLSession()
burl = "https://www.britannica.com/technology/automobile"
# Use the object above to connect to needed webpage
resp = session.get(burl)
resp.html.absolute_links
for u in resp.html.absolute_links:
if burl in u and u != burl:
print(u)
r.html.links
r.html.absolute_links
r.html.text
! pwd
! pip install wordnik-py3
from wordnik import *
apiUrl = 'http://api.wordnik.com/v4'
apiKey = '9dmlxriqc79go7af39ogn53f500z13os36y35a1vfmw2baes8'
client = swagger.ApiClient(apiKey, apiUrl)
wordApi = WordApi.WordApi(client)
examples = wordApi.getExamples('car')
print(examples)
print(examples.examples)
for el in examples.examples:
print(el.text)
###Output
The more he thought about it, the more E.erhard sensed the time might be exactly right for starting a car company — an E. V.-car company at that.
- I got a new car, which has also become my new hobby. * it's a beautiful fast car* I have good mentors, one good one mostly, who has been there when i'm lost for answers on why it does the things i don't want it to do.
Seriously, the Exige is one super quick car * specially designed to be a track car*, and the Mustang is, well, just a run of the mill muscle car - a very good one, but still designed more for the street, than the track.
Seriously, the Exige is one super quick car * specially designed to be a track car*, and the Mustang is, well, just a run of the mill muscle car - a very good one, but still designed more for the street, than the track.
I'll meet you at the car Krystal: ok * By paint* Kristina: Now I'm looking at for a bright pink*Looking around and notices a cute boy* Kristina: Well don't he look good * Boy walks by and bumps Kristina* Boy: I'm so sorry I didn't mean to*smiles at her* Kristina: it's ok*pays for paint and goes meet her mom at the car* Krystal: why are you so smiley?
Seriously, the Exige is one super quick car * specially designed to be a track car*, and the Mustang is, well, just a run of the mill muscle car - a very good one, but still designed more for the street, than the track.
Seriously, the Exige is one super quick car * specially designed to be a track car*, and the Mustang is, well, just a run of the mill muscle car - a very good one, but still designed more for the street, than the track.
Seriously, the Exige is one super quick car * specially designed to be a track car*, and the Mustang is, well, just a run of the mill muscle car - a very good one, but still designed more for the street, than the track.
Vehicle features: ** bmw full free warranty and full free maintenance good till 50,000 miles**clean car fax and 1 owner california car** popular and hard to find jet black 2006 bmw
Vehicle features: ** bmw full free warranty and full free maintenance good till 50,000 miles**clean car fax and 1 owner california car**. well equipped with moonroof, power front seats, automatic climate control, hi-fi sound system, steptronic automatic transmission, rain sensor and auto headlight, dynamic cruise control and dark burl walnut wood trim. **test drive it today!
|
03-Keras-ModelTraining/03-Keras-ModelTraining.ipynb | ###Markdown
一 一般模型的构造、训练、测试流程
###Code
# 模型构造
inputs = keras.Input(shape=(784,), name='mnist_input')
h1 = layers.Dense(64, activation='relu')(inputs)
h1 = layers.Dense(64, activation='relu')(h1)
outputs = layers.Dense(10, activation='softmax')(h1)
model = keras.Model(inputs, outputs)
keras.utils.plot_model(model, 'net001.png', show_shapes=True)
# 模型编译
model.compile(optimizer=keras.optimizers.RMSprop(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
# 载入数据
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype('float32') /255
x_test = x_test.reshape(10000, 784).astype('float32') /255
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# 训练模型
history = model.fit(x_train, y_train, batch_size=64, epochs=3,
validation_data=(x_val, y_val))
print('history:')
print(history.history)
# 验证
result = model.evaluate(x_test, y_test, batch_size=128)
print('evaluate:')
print(result)
# 预测
pred = model.predict(x_test[:2])
print('predict:')
print(pred)
###Output
predict:
[[7.9272844e-08 2.4325981e-08 1.7843488e-05 1.6991945e-04 2.2156739e-09
1.4676035e-06 3.0944847e-12 9.9977714e-01 1.4217727e-07 3.3358952e-05]
[9.3406788e-07 2.0751820e-04 9.9966800e-01 6.5498389e-05 1.3010475e-12
5.2543826e-05 2.3772755e-06 4.4947761e-08 3.0699866e-06 3.1939429e-12]]
###Markdown
二 自定义损失和指标 1 自定义指标 方法一 继承Metric类自定义指标只需继承Metric类, 并重写一下函数_init_(self),初始化。update_state(self,y_true,y_pred,sample_weight = None),它使用目标y_true和模型预测y_pred来更新状态变量。result(self),它使用状态变量来计算最终结果。reset_states(self),重新初始化度量的状态。
###Code
# 这是一个简单的示例,显示如何实现CatgoricalTruePositives指标,该指标计算正确分类为属于给定类的样本数量
class CatgoricalTruePostives(keras.metrics.Metric):
def __init__(self, name='binary_true_postives', **kwargs):
super(CatgoricalTruePostives, self).__init__(name=name, **kwargs)
self.true_postives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.argmax(y_pred)
y_true = tf.equal(tf.cast(y_pred, tf.int32), tf.cast(y_true, tf.int32))
y_true = tf.cast(y_true, tf.float32)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, tf.float32)
y_true = tf.multiply(sample_weight, y_true)
return self.true_postives.assign_add(tf.reduce_sum(y_true))
def result(self):
return tf.identity(self.true_postives)
def reset_states(self):
self.true_postives.assign(0.)
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[CatgoricalTruePostives()])
history = model.fit(x_train, y_train, batch_size=64, epochs=3)
print(history.history)
###Output
Epoch 1/3
782/782 [==============================] - 1s 1ms/step - loss: 0.0968 - binary_true_postives: 8129.0000
Epoch 2/3
782/782 [==============================] - 1s 1ms/step - loss: 0.0805 - binary_true_postives: 7928.0000
Epoch 3/3
782/782 [==============================] - 1s 1ms/step - loss: 0.0689 - binary_true_postives: 8266.0000
{'loss': [0.09683869779109955, 0.08049371838569641, 0.06889117509126663], 'binary_true_postives': [8129.0, 7928.0, 8266.0]}
###Markdown
方法二 定义网络层添加需统计的metric
###Code
# 也可以以定义网络层的方式添加要统计的metric
class MetricLoggingLayer(layers.Layer):
def call(self, inputs):
self.add_metric(keras.backend.std(inputs),
name='std_of_activation',
aggregation='mean')
return inputs
inputs = keras.Input(shape=(784,), name='mnist_input')
h1 = layers.Dense(64, activation='relu')(inputs)
h1 = MetricLoggingLayer()(h1)
h1 = layers.Dense(64, activation='relu')(h1)
outputs = layers.Dense(10, activation='softmax')(h1)
model = keras.Model(inputs, outputs)
# 构造网络等价于
# inputs = keras.Input(shape=(784,), name='mnist_input')
# h1 = layers.Dense(64, activation='relu')(inputs)
# h2 = layers.Dense(64, activation='relu')(h1)
# outputs = layers.Dense(10, activation='softmax')(h2)
# model = keras.Model(inputs, outputs)
# model.add_metric(keras.backend.std(inputs),
# name='std_of_activation',
# aggregation='mean')
keras.utils.plot_model(model, 'net002.png', show_shapes=True)
model.compile(optimizer=keras.optimizers.RMSprop(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
history = model.fit(x_train, y_train, batch_size=32, epochs=1)
print(history.history)
###Output
1563/1563 [==============================] - 2s 1ms/step - loss: 0.2984 - sparse_categorical_accuracy: 0.9118 - std_of_activation: 0.9533A: 0s - loss: 0.3430 - sparse_categorical_accuracy: 0.8990 - std_of_activa
{'loss': [0.2983883321285248], 'sparse_categorical_accuracy': [0.9118000268936157], 'std_of_activation': [0.9533095359802246]}
###Markdown
2 自定义损失
###Code
# 以定义网络层的方式添加网络loss
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs) * 0.1)
return inputs
inputs = keras.Input(shape=(784,), name='mnist_input')
h1 = layers.Dense(64, activation='relu')(inputs)
h1 = ActivityRegularizationLayer()(h1)
h1 = layers.Dense(64, activation='relu')(h1)
outputs = layers.Dense(10, activation='softmax')(h1)
model = keras.Model(inputs, outputs)
# 构造网络等价于
# inputs = keras.Input(shape=(784,), name='mnist_input')
# h1 = layers.Dense(64, activation='relu')(inputs)
# h2 = layers.Dense(64, activation='relu')(h1)
# outputs = layers.Dense(10, activation='softmax')(h2)
# model = keras.Model(inputs, outputs)
# model.add_loss(tf.reduce_sum(h1)*0.1)
keras.utils.plot_model(model, 'net003.png', show_shapes=True)
model.compile(optimizer=keras.optimizers.RMSprop(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
history = model.fit(x_train, y_train, batch_size=32, epochs=1)
print(history.history)
###Output
1563/1563 [==============================] - 2s 1ms/step - loss: 2.3640 - sparse_categorical_accuracy: 0.1129
{'loss': [2.3640332221984863], 'sparse_categorical_accuracy': [0.1129399985074997]}
###Markdown
三 使用tf.data构造数据
###Code
def get_compiled_model():
inputs = keras.Input(shape=(784,), name='mnist_input')
h1 = layers.Dense(64, activation='relu')(inputs)
h2 = layers.Dense(64, activation='relu')(h1)
outputs = layers.Dense(10, activation='softmax')(h2)
model = keras.Model(inputs, outputs)
model.compile(optimizer=keras.optimizers.RMSprop(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
return model
model = get_compiled_model()
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
# model.fit(train_dataset, epochs=3)
# steps_per_epoch 每个epoch只训练几步
# validation_steps 每次验证,验证几步
history = model.fit(train_dataset, epochs=3, steps_per_epoch=100,
validation_data=val_dataset, validation_steps=3)
print(history.history)
###Output
Epoch 1/3
100/100 [==============================] - 0s 2ms/step - loss: 0.7992 - sparse_categorical_accuracy: 0.7944 - val_loss: 0.3786 - val_sparse_categorical_accuracy: 0.8906
Epoch 2/3
100/100 [==============================] - 0s 1ms/step - loss: 0.3818 - sparse_categorical_accuracy: 0.8919 - val_loss: 0.3215 - val_sparse_categorical_accuracy: 0.9010
Epoch 3/3
100/100 [==============================] - 0s 1ms/step - loss: 0.3232 - sparse_categorical_accuracy: 0.9069 - val_loss: 0.2437 - val_sparse_categorical_accuracy: 0.9167
{'loss': [0.7991724610328674, 0.3817708492279053, 0.3231588304042816], 'sparse_categorical_accuracy': [0.7943750023841858, 0.8918750286102295, 0.9068750143051147], 'val_loss': [0.3785804510116577, 0.32150909304618835, 0.24366529285907745], 'val_sparse_categorical_accuracy': [0.890625, 0.9010416865348816, 0.9166666865348816]}
###Markdown
四 样本权重和类权重“样本权重”数组是一个数字数组,用于指定批处理中每个样本在计算总损失时应具有多少权重。 它通常用于不平衡的分类问题(这个想法是为了给予很少见的类更多的权重)。 当使用的权重是1和0时,该数组可以用作损失函数的掩码(完全丢弃某些样本对总损失的贡献)。“类权重”dict是同一概念的更具体的实例:它将类索引映射到应该用于属于该类的样本的样本权重。 例如,如果类“0”比数据中的类“1”少两倍,则可以使用class_weight = {0:1.,1:0.5}。
###Code
# 增加第5类的权重
import numpy as np
# 类权重,这里表示分类为5的样本为其他分类的一半
# model = get_compiled_model()
# class_weight = {i:1.0 for i in range(10)}
# class_weight[5] = 2.0
# print(class_weight)
# history = model.fit(x_train, y_train,
# class_weight=class_weight,
# batch_size=64,
# epochs=4)
# 样本权重,表示效果同上,不同表达
# model = get_compiled_model()
# sample_weight = np.ones(shape=(len(y_train),))
# sample_weight[y_train == 5] = 2.0
# history = model.fit(x_train, y_train,
# sample_weight=sample_weight,
# batch_size=64,
# epochs=4)
# tf.data数据
model = get_compiled_model()
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train,
sample_weight))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# 验证集使用权重
# val_weight = np.ones(shape=(len(y_val),))
# val_weight[y_val == 5] = 2.0
# val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val,
# val_weight))
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
history = model.fit(train_dataset, epochs=3,
validation_data=val_dataset, validation_steps=3)
print(history.history)
# tf.data数据
model = get_compiled_model()
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train,
sample_weight))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
val_weight = np.ones(shape=(len(y_val),))
val_weight[y_val == 5] = 2.0
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val,
val_weight))
val_dataset = val_dataset.batch(64)
history = model.fit(train_dataset, epochs=3,
validation_data=val_dataset, validation_steps=3)
print(history.history)
###Output
Epoch 1/3
782/782 [==============================] - 1s 2ms/step - loss: 0.3666 - sparse_categorical_accuracy: 0.9036 - val_loss: 0.2044 - val_sparse_categorical_accuracy: 0.9479
Epoch 2/3
782/782 [==============================] - 1s 2ms/step - loss: 0.1791 - sparse_categorical_accuracy: 0.9505 - val_loss: 0.1325 - val_sparse_categorical_accuracy: 0.9583
Epoch 3/3
782/782 [==============================] - 1s 2ms/step - loss: 0.1330 - sparse_categorical_accuracy: 0.9629 - val_loss: 0.0920 - val_sparse_categorical_accuracy: 0.9844
{'loss': [0.3665533661842346, 0.17910617589950562, 0.132967010140419], 'sparse_categorical_accuracy': [0.9036399722099304, 0.9504799842834473, 0.96288001537323], 'val_loss': [0.2044307440519333, 0.1324818879365921, 0.09196767956018448], 'val_sparse_categorical_accuracy': [0.9479166865348816, 0.9583333134651184, 0.984375]}
###Markdown
结论:验证集使用权重后,理论上loss会上升,但同时准确率也会较之前提高 五 多输入多输出模型
###Code
image_input = keras.Input(shape=(32, 32, 3), name='img_input')
timeseries_input = keras.Input(shape=(None, 10), name='ts_input')
x1 = layers.Conv2D(3, 3)(image_input)
x1 = layers.GlobalMaxPooling2D()(x1)
x2 = layers.Conv1D(3, 3)(timeseries_input)
x2 = layers.GlobalMaxPooling1D()(x2)
x = layers.concatenate([x1, x2])
score_output = layers.Dense(1, name='score_output')(x)
class_output = layers.Dense(5, activation='softmax', name='class_output')(x)
model = keras.Model(inputs=[image_input, timeseries_input],
outputs=[score_output, class_output])
keras.utils.plot_model(model, 'multi_input_output_model.png'
, show_shapes=True)
# 可以为模型指定不同的loss和metrics
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy()])
# 还可以指定loss的权重
# model.compile(
# optimizer=keras.optimizers.RMSprop(1e-3),
# loss={'score_output': keras.losses.MeanSquaredError(),
# 'class_output': keras.losses.CategoricalCrossentropy()},
# metrics={'score_output': [keras.metrics.MeanAbsolutePercentageError(),
# keras.metrics.MeanAbsoluteError()],
# 'class_output': [keras.metrics.CategoricalAccuracy()]},
# loss_weight={'score_output': 2., 'class_output': 1.})
# 可以把不需要传播的loss置0
# model.compile(
# optimizer=keras.optimizers.RMSprop(1e-3),
# loss=[None, keras.losses.CategoricalCrossentropy()])
# Or dict loss version
# model.compile(
# optimizer=keras.optimizers.RMSprop(1e-3),
# loss={'class_output': keras.losses.CategoricalCrossentropy()})
###Output
_____no_output_____
###Markdown
六 使用回调Keras中的回调是在训练期间(在epoch开始时,batch结束时,epoch结束时等)在不同点调用的对象,可用于实现以下行为:* 在培训期间的不同时间点进行验证(超出内置的每个时期验证)* 定期检查模型或超过某个精度阈值* 在训练似乎平稳时改变模型的学习率* 在训练似乎平稳时对顶层进行微调* 在培训结束或超出某个性能阈值时发送电子邮件或即时消息通知等等。**可使用的内置回调有*** ModelCheckpoint:定期保存模型。* EarlyStopping:当训练不再改进验证指标时停止培训。* TensorBoard:定期编写可在TensorBoard中显示的模型日志(更多细节见“可视化”)。* CSVLogger:将丢失和指标数据流式传输到CSV文件。* 等等 1 回调使用
###Code
# 早停
model = get_compiled_model()
callbacks = [
keras.callbacks.EarlyStopping(
# 是否有提升关注的指标
monitor='val_loss',
# 不再提升的阈值
min_delta=1e-2,
# 2个epoch没有提升就停止
patience=2,
verbose=1)
]
model.fit(x_train, y_train,
epochs=20,
batch_size=64,
callbacks=callbacks,
validation_split=0.2)
# checkpoint模型回调
model = get_compiled_model()
check_callback = keras.callbacks.ModelCheckpoint(
filepath='mymodel_{epoch}.h5',
save_best_only=True,
monitor='val_loss',
verbose=1
)
model.fit(x_train, y_train,
epochs=3,
batch_size=64,
callbacks=[check_callback],
validation_split=0.2)
# 动态调整学习率
initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=10000,
decay_rate=0.96,
staircase=True
)
optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)
inputs = keras.Input(shape=(784,), name='mnist_input')
h1 = layers.Dense(64, activation='relu')(inputs)
h2 = layers.Dense(64, activation='relu')(h1)
outputs = layers.Dense(10, activation='softmax')(h2)
model = keras.Model(inputs, outputs)
model.compile(optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
# 使用tensorboard
tensorboard_cbk = keras.callbacks.TensorBoard(log_dir='./logs')
model.fit(x_train, y_train,
epochs=5,
batch_size=64,
callbacks=[tensorboard_cbk],
validation_split=0.2)
###Output
Epoch 1/5
625/625 [==============================] - 1s 2ms/step - loss: 2.7099 - sparse_categorical_accuracy: 0.3422 - val_loss: 1.7445 - val_sparse_categorical_accuracy: 0.3856
Epoch 2/5
625/625 [==============================] - 1s 2ms/step - loss: 1.6877 - sparse_categorical_accuracy: 0.3144 - val_loss: 1.6926 - val_sparse_categorical_accuracy: 0.2946
Epoch 3/5
625/625 [==============================] - 1s 2ms/step - loss: 1.6924 - sparse_categorical_accuracy: 0.2828 - val_loss: 1.8450 - val_sparse_categorical_accuracy: 0.1862
Epoch 4/5
625/625 [==============================] - 1s 2ms/step - loss: 1.8441 - sparse_categorical_accuracy: 0.1968 - val_loss: 1.8945 - val_sparse_categorical_accuracy: 0.2042
Epoch 5/5
625/625 [==============================] - 1s 2ms/step - loss: 1.8483 - sparse_categorical_accuracy: 0.1980 - val_loss: 1.8482 - val_sparse_categorical_accuracy: 0.2048
###Markdown
2 自定义回调函数
###Code
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.losses = []
def on_epoch_end(self, batch, logs):
self.losses.append(logs.get('loss'))
print('\nloss:',self.losses[-1])
model = get_compiled_model()
callbacks = [
LossHistory()
]
model.fit(x_train, y_train,
epochs=3,
batch_size=64,
callbacks=callbacks,
validation_split=0.2)
###Output
Epoch 1/3
591/625 [===========================>..] - ETA: 0s - loss: 0.3811 - sparse_categorical_accuracy: 0.8928
loss: 0.37221866846084595
625/625 [==============================] - 1s 2ms/step - loss: 0.3722 - sparse_categorical_accuracy: 0.8952 - val_loss: 0.2246 - val_sparse_categorical_accuracy: 0.9319
Epoch 2/3
615/625 [============================>.] - ETA: 0s - loss: 0.1713 - sparse_categorical_accuracy: 0.9482
loss: 0.17089442908763885
625/625 [==============================] - 1s 2ms/step - loss: 0.1709 - sparse_categorical_accuracy: 0.9483 - val_loss: 0.1755 - val_sparse_categorical_accuracy: 0.9463
Epoch 3/3
605/625 [============================>.] - ETA: 0s - loss: 0.1267 - sparse_categorical_accuracy: 0.9615
loss: 0.12589748203754425
625/625 [==============================] - 1s 1ms/step - loss: 0.1259 - sparse_categorical_accuracy: 0.9617 - val_loss: 0.1666 - val_sparse_categorical_accuracy: 0.9506
###Markdown
七 自定义构造训练和验证循环 训练并验证
###Code
# 训练并验证
# 获取模型
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# sgd优化器
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# 分类损失函数
loss_fn = keras.losses.SparseCategoricalCrossentropy()
# 设定统计参数
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
# 准备训练数据
batch_size = 64
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# 准备验证数据
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
# 迭代训练
for epoch in range(3):
print('Start of epoch %d' % (epoch,))
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# 更新统计传输
train_acc_metric(y_batch_train, logits)
# 输出
if step % 200 == 0:
print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value)))
print('Seen so far: %s samples' % ((step + 1) * 64))
# 输出统计参数的值
train_acc = train_acc_metric.result()
print('Training acc over epoch: %s' % (float(train_acc),))
# 重置统计参数
train_acc_metric.reset_states()
# 用模型进行验证
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val)
# 根据验证的统计参数
val_acc_metric(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print('Validation acc: %s' % (float(val_acc),))
## 添加自己构造的loss, 每次只能看到最新一次训练增加的loss
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * tf.reduce_sum(inputs))
return inputs
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
logits = model(x_train[:64])
print(model.losses)
logits = model(x_train[:64])
logits = model(x_train[64: 128])
logits = model(x_train[128: 192])
print(model.losses)
# 将loss添加进求导中
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
for epoch in range(3):
print('Start of epoch %d' % (epoch,))
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train)
loss_value = loss_fn(y_batch_train, logits)
# 添加额外的loss
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# 每200个batch输出一次学习.
if step % 200 == 0:
print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value)))
print('Seen so far: %s samples' % ((step + 1) * 64))
###Output
[<tf.Tensor: shape=(), dtype=float32, numpy=6.5006733>]
[<tf.Tensor: shape=(), dtype=float32, numpy=6.547977>]
Start of epoch 0
Training loss (for one batch) at step 0: 8.892967224121094
Seen so far: 64 samples
Training loss (for one batch) at step 200: 2.541232109069824
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 2.4225268363952637
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 2.336982011795044
Seen so far: 38464 samples
Start of epoch 1
Training loss (for one batch) at step 0: 2.3284807205200195
Seen so far: 64 samples
Training loss (for one batch) at step 200: 2.3222813606262207
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 2.338780641555786
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 2.306298017501831
Seen so far: 38464 samples
Start of epoch 2
Training loss (for one batch) at step 0: 2.305915117263794
Seen so far: 64 samples
Training loss (for one batch) at step 200: 2.320936679840088
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 2.3123230934143066
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 2.313464879989624
Seen so far: 38464 samples
|
tutorials/W1D1_BasicsAndPytorch/W1D1_Tutorial1.ipynb | ###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Kelson Shilling-Scrivo, Deepak Raya, Siwei Bai__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from evaltools.airtable import AirtableForm
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
print(f"time taken for {iterations} iterations of {f.__name__}({dim}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Google Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function. Complete the second function, such that it is performs the same operations as the first function, but entirely on the GPU. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda:0"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
x = ...
y = ...
z = ...
x = ...
y = ...
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
x = torch.rand(dim, dim).to(device)
y = torch.rand_like(x).to(device)
z = 2*torch.ones(dim, dim).to(device)
x = x * y
x = x @ z
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000): 28.50481time taken for 1 iterations of simpleFunGPU(10000): 0.91102``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural NetworksNow it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. even if you don't finish the tutorial, still submit!Submitting is the only way we can verify that you attempted each tutorial, which is critical for the award of your completion certificate at the end of the course.Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()`Just know that is for saving airtable information only, it will not affect the code that is being run around it in any way , so please do not modify, comment out, or worry about any of those lines of code.
###Code
# @title Airtable Submission Link
from IPython import display
display.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one Plotby [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
POS_FILE = 'http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json'
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around.1. hover over a dot to see a tooltip (title, author)2. select a year in the legend (right) to filter dots2. zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?2. Can you determine when deep learning methods started booming ?3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. embed each paper by using abstract+title in SPECTER model4. project based on embedding using UMAP5. visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Deepak Raya, Siwei Bai, Kelson Shilling-Scrivo__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
These are the slides for all videos in this tutorial. If you want to locally dowload the slides, click [here](https://osf.io/wcjrv/download). --- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google Colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
if device == 'cpu':
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
else:
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course*Time estimate: ~25mins*
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
This will be an intensive 3 week adventure. We will all learn Deep Learning (DL) in a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch*Time estimate: ~2 hours 05 mins* PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the Appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that `.empty()` does not return zeros, but seemingly random small numbers. Unlike `.zeros()`, which initialises the elements of the tensor with zeros, `.empty()` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and GPU):```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries (e.g., NumPy):```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim \mathcal{U}(0,1)^\dagger$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.$^\dagger$: $\mathcal{U(\alpha, \beta)}$ denotes the [uniform distribution](https://en.wikipedia.org/wiki/Continuous_uniform_distribution) from $\alpha$ to $\beta$, with $\alpha, \beta \in \mathbb{R}$.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under `torch.`.
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under `torch.`.
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators ($+$, $-$, $*$, $/$, and $**$) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The `**` is the exponentiation operator
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents (feel free to skip if you already know this!).
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The `@` symbol is overridden to represent matrix multiplication. You can also use `torch.matmul()` to multiply tensors. For dot multiplication, you can use `torch.dot()`, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using `torch.t()` or `Tensor.T`. Note the lack of brackets for `Tensor.T` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, `[-1]` selects the last element; `[1:3]` selects the second and the third elements, and `[:-2]` will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as NumPy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the `.flatten()` and `.reshape()` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the `.view()` methods used a lot to reshape tensors. There is a subtle difference between `.view()` and `.reshape()`, though for now we will just use `.reshape()`. The documentation can be found in the Appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. E.g., `[1,10]` or `[256, 1, 3]`. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the `.squeeze()` method. We can use the `.unsqueeze()` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, `x[0]` gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim $[3\times48\times64]$, but our pipeline expects the colour dimension to be the last dimension, i.e., $[48\times64\times3]$. To get around this we can use the `.permute()` method.
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see `.transpose()` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length (`6`) is the sum of the two input tensors’ axis-0 lengths (`3+3`); while the second output tensor’s axis-1 length (`8`) is the sum of the two input tensors’ axis-1 lengths (`4+4`).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy, and the converted result does not share memory. This minor inconvenience is quite important: when you perform operations on the CPU or GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a NumPy array, the information being tracked by the tensor will be lost, i.e., the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$, i.e., a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** Pay close attention to singleton dimensions.**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor.
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following *Runtime* → *Change runtime type* and selecting **GPU** from the *Hardware Accelerator* dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.For more information on the GPU usage policy you can view in the Appendix. **Now we have a GPU.** The cell below should return `True`.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
[CUDA](https://developer.nvidia.com/cuda-toolkit) is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python.In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling `.to()`. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine CUDA tensors and CPU tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error, if CUDA is not enabled in your machine.Generally, in this course, all Deep Learning is done on the GPU, and any computation is done on the CPU, so sometimes we have to pass things back and forth, so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function `simpleFun`. Complete this function, such that it performs the operations:- elementwise multiplication- matrix multiplicationThe operations should be able to perfomed on either the CPU or GPU specified by the parameter `device`. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the function, but
## ensure all computations happens on the `device`
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = ...
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = ...
# 2D tensor filled with the scalar value 2, dim x dim
z = ...
# elementwise multiplication of x and y
a = ...
# matrix multiplication of x and y
b = ...
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = torch.rand(dim, dim).to(device)
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = torch.rand_like(x).to(device)
# 2D tensor filled with the scalar value 2, dim x dim
z = 2*torch.ones(dim, dim).to(device)
# elementwise multiplication of x and y
a = x * y
# matrix multiplication of x and y
b = x @ z
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000, cpu): 23.74070time taken for 1 iterations of simpleFun(10000, cuda): 0.87535``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Important:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: load the CIFAR10 data,
## but as grayscale images and not as RGB colored.
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural Networks*Time estimate: ~1 hour 30 mins (excluding video)* Now it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:* `__init__` In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.* `forward` All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.* `predict` This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.* `train` This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.**Note:** You can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it.
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we will cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{matrix} X & Y & \text{XOR}\\ \hline 0 & 0 & 0\\ 0 & 1 & 1\\ 1 & 0 & 1\\ 1 & 1 & 0\end{matrix}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown Play with the parameters to solve XOR
from IPython.display import IFrame
IFrame("https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false", width=1020, height=660)
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/) Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/) Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/) Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. **even if you don't finish the tutorial, still submit!**Submitting is the only way we can verify that you attempted each tutorial, which is critical for us to be able to track your progress. TL;DR: Basic tutorial workflow1. Work through the tutorial, answering **Think!** questions and **Coding Exercises**.2. At end each tutorial, (even if tutorial incomplete) run the airtable submission code cell.3. Push the *Submission* button.4. If the last tutorial of the day, *Submission* button will also take you to the end of the day survey on a new page. complete that and submit it. Submission FAQs: 1. What if I want to change my answers to previous discussion questions? > you are free to change and resubmit any of the answers and Think! questions as many times as you like. However, **please only run the airtable submission code and click on the link once you are ready to submit**.2. Okay, but what if I submitted my airtable anyway and reallly want to resubmit?> After making changes, you can re-run the airtable submission cell code cell. This will result in a second submission from you for the data. This will make Darryl sad as it will be more work for him to clean up the data later. 3. HELP! I accidentally ran the code to generate the airtable submission button before I was ready to submit! what do I do?> If you run the code to generate the link, anything that happens afterwards will not be captured. Complete the tutorial and make sure to re-run the airtable submission again when you are finished before pressing the submission button. 4. What if I want to work on this on my own later, should I wait to submit until I'm finished?> Please submit wherever you are at the end of the day. It's graet that you want to keep working on this, but it's important to see the places where we tried things that didn't quite work out, so we can fix them for next year. Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only. **It will not affect the code that is being run around it in any way**, so please do not modify, comment out, or worry about any of those lines of code.Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
###Code
# @title Airtable Submission Link
from IPython import display as IPyDisplay
IPyDisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one PlotBy [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.**Note:** The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import requests
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json
POS_FILE = 'https://osf.io/qyrfn/download'
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'https://osf.io/vfdu6/download'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around:1. Hover over a dot to see a tooltip (title, author)2. Select a year in the legend (right) to filter dots3. Zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?3. Can you determine when deep learning methods started booming ?4. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. Embed each paper by using abstract+title in SPECTER model4. Project based on embedding using UMAP5. Visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Deepak Raya, Siwei Bai, Kelson Shilling-Scrivo__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
# @markdown If you want to locally dowload the slides, click [here](https://osf.io/wcjrv/download)
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
if device == 'cpu':
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
else:
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Google Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course*Time estimate: ~25mins*
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch*Time estimate: ~2 hours 05 mins* PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.T```. Note the lack of brackets for ```Tensor.T``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function `simpleFun`. Complete this function, such that it performs the operations:- elementwise multiplication- matrix multiplicationThe operations should be able to perfomed on either the CPU or GPU specified by the parameter `device`. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the function, but
## ensure all computations happens on the `device`
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = ...
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = ...
# 2D tensor filled with the scalar value 2, dim x dim
z = ...
# elementwise multiplication of x and y
a = ...
# matrix multiplication of x and y
b = ...
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = torch.rand(dim, dim).to(device)
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = torch.rand_like(x).to(device)
# 2D tensor filled with the scalar value 2, dim x dim
z = 2*torch.ones(dim, dim).to(device)
# elementwise multiplication of x and y
a = x * y
# matrix multiplication of x and y
b = x @ z
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000, cpu): 23.74070time taken for 1 iterations of simpleFun(10000, cuda): 0.87535``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: load the CIFAR10 data,
## but as grayscale images and not as RGB colored.
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural Networks*Time estimate: ~1 hour 30 mins (excluding video)* Now it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we will cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. even if you don't finish the tutorial, still submit!Submitting is the only way we can verify that you attempted each tutorial, which is critical for us to be able to track your progress. TL;DR: Basic tutorial workflow1. work through the tutorial, answering Think! questions and code exercises2. at end each tutorial, (even if tutorial incomplete) run the airtable submission code cell3. Push the submission button4. if the last tutorial of the day, Submission button will also take you to the end of the day survey on a new page. complete that and submit it. Submission FAQs: 1. What if I want to change my answers to previous discussion questions? > you are free to change and resubmit any of the answers and Think! questions as many times as you like. However, please only run the airtable submission code and click on the link once you are ready to submit.2. Okay, but what if I submitted my airtable anyway and reallly want to resubmit?> After making changes, you can re-run the airtable submission cell code cell. This will result in a second submission from you for the data. This will make Darryl sad as it will be more work for him to clean up the data later. 3. HELP! I accidentally ran the code to generate the airtable submission button before I was ready to submit! what do I do?> If you run the code to generate the link, anything that happens afterwards will not be captured. Complete the tutorial and make sure to re-run the airtable submission again when you are finished before pressing the submission button. 4. What if I want to work on this on my own later, should I wait to submit until I'm finished?> Please submit wherever you are at the end of the day. It's graet that you want to keep working on this, but it's important to see the places where we tried things that didn't quite work out, so we can fix them for next year. Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only. It will not affect the code that is being run around it in any way , so please do not modify, comment out, or worry about any of those lines of code.Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
###Code
# @title Airtable Submission Link
from IPython import display as IPyDisplay
IPyDisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one Plotby [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import requests
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json
POS_FILE = 'https://osf.io/qyrfn/download'
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'https://osf.io/vfdu6/download'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around.1. hover over a dot to see a tooltip (title, author)2. select a year in the legend (right) to filter dots2. zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?2. Can you determine when deep learning methods started booming ?3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. embed each paper by using abstract+title in SPECTER model4. project based on embedding using UMAP5. visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Deepak Raya, Siwei Bai, Kelson Shilling-Scrivo__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
if device == 'cpu':
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
else:
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Google Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course*Time estimate: ~25mins*
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch*Time estimate: ~2 hours 05 mins* PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function `simpleFun`. Complete this function, such that it performs the operations:- elementwise multiplication- matrix multiplicationThe operations should be able to perfomed on either the CPU or GPU specified by the parameter `device`. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the function, but
## ensure all computations happens on the `device`
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = ...
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = ...
# 2D tensor filled with the scalar value 2, dim x dim
z = ...
# elementwise multiplication of x and y
a = ...
# matrix multiplication of x and y
b = ...
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = torch.rand(dim, dim).to(device)
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = torch.rand_like(x).to(device)
# 2D tensor filled with the scalar value 2, dim x dim
z = 2*torch.ones(dim, dim).to(device)
# elementwise multiplication of x and y
a = x * y
# matrix multiplication of x and y
b = x @ z
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000, cpu): 23.74070time taken for 1 iterations of simpleFun(10000, cuda): 0.87535``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: load the CIFAR10 data,
## but as grayscale images and not as RGB colored.
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural Networks*Time estimate: ~1 hour 30 mins (excluding movie)* Now it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. even if you don't finish the tutorial, still submit!Submitting is the only way we can verify that you attempted each tutorial, which is critical for us to be able to track your progress. TL;DR: Basic tutorial workflow1. work through the tutorial, answering Think! questions and code exercises2. at end each tutorial, (even if tutorial incomplete) run the airtable submission code cell3. Push the submission button4. if the last tutorial of the day, Submission button will also take you to the end of the day survey on a new page. complete that and submit it. Submission FAQs: 1. What if I want to change my answers to previous discussion questions? > you are free to change and resubmit any of the answers and Think! questions as many times as you like. However, please only run the airtable submission code and click on the link once you are ready to submit.2. Okay, but what if I submitted my airtable anyway and reallly want to resubmit?> After making changes, you can re-run the airtable submission cell code cell. This will result in a second submission from you for the data. This will make Darryl sad as it will be more work for him to clean up the data later. 3. HELP! I accidentally ran the code to generate the airtable submission button before I was ready to submit! what do I do?> If you run the code to generate the link, anything that happens afterwards will not be captured. Complete the tutorial and make sure to re-run the airtable submission again when you are finished before pressing the submission button. 4. What if I want to work on this on my own later, should I wait to submit until I'm finished?> Please submit wherever you are at the end of the day. It's graet that you want to keep working on this, but it's important to see the places where we tried things that didn't quite work out, so we can fix them for next year. Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only. It will not affect the code that is being run around it in any way , so please do not modify, comment out, or worry about any of those lines of code.Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
###Code
# @title Airtable Submission Link
from IPython import display
display.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one Plotby [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import requests
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json
POS_FILE = 'https://osf.io/qyrfn/download'
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'https://osf.io/vfdu6/download'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around.1. hover over a dot to see a tooltip (title, author)2. select a year in the legend (right) to filter dots2. zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?2. Can you determine when deep learning methods started booming ?3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. embed each paper by using abstract+title in SPECTER model4. project based on embedding using UMAP5. visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Deepak Raya, Siwei Bai, Kelson Shilling-Scrivo__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
if device == 'cpu':
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
else:
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Google Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course*Time estimate: ~25mins*
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch*Time estimate: ~2 hours 05 mins* PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.T```. Note the lack of brackets for ```Tensor.T``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function `simpleFun`. Complete this function, such that it performs the operations:- elementwise multiplication- matrix multiplicationThe operations should be able to perfomed on either the CPU or GPU specified by the parameter `device`. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the function, but
## ensure all computations happens on the `device`
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = ...
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = ...
# 2D tensor filled with the scalar value 2, dim x dim
z = ...
# elementwise multiplication of x and y
a = ...
# matrix multiplication of x and y
b = ...
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = torch.rand(dim, dim).to(device)
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = torch.rand_like(x).to(device)
# 2D tensor filled with the scalar value 2, dim x dim
z = 2*torch.ones(dim, dim).to(device)
# elementwise multiplication of x and y
a = x * y
# matrix multiplication of x and y
b = x @ z
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000, cpu): 23.74070time taken for 1 iterations of simpleFun(10000, cuda): 0.87535``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: load the CIFAR10 data,
## but as grayscale images and not as RGB colored.
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural Networks*Time estimate: ~1 hour 30 mins (excluding video)* Now it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we will cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. even if you don't finish the tutorial, still submit!Submitting is the only way we can verify that you attempted each tutorial, which is critical for us to be able to track your progress. TL;DR: Basic tutorial workflow1. work through the tutorial, answering Think! questions and code exercises2. at end each tutorial, (even if tutorial incomplete) run the airtable submission code cell3. Push the submission button4. if the last tutorial of the day, Submission button will also take you to the end of the day survey on a new page. complete that and submit it. Submission FAQs: 1. What if I want to change my answers to previous discussion questions? > you are free to change and resubmit any of the answers and Think! questions as many times as you like. However, please only run the airtable submission code and click on the link once you are ready to submit.2. Okay, but what if I submitted my airtable anyway and reallly want to resubmit?> After making changes, you can re-run the airtable submission cell code cell. This will result in a second submission from you for the data. This will make Darryl sad as it will be more work for him to clean up the data later. 3. HELP! I accidentally ran the code to generate the airtable submission button before I was ready to submit! what do I do?> If you run the code to generate the link, anything that happens afterwards will not be captured. Complete the tutorial and make sure to re-run the airtable submission again when you are finished before pressing the submission button. 4. What if I want to work on this on my own later, should I wait to submit until I'm finished?> Please submit wherever you are at the end of the day. It's graet that you want to keep working on this, but it's important to see the places where we tried things that didn't quite work out, so we can fix them for next year. Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only. It will not affect the code that is being run around it in any way , so please do not modify, comment out, or worry about any of those lines of code.Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
###Code
# @title Airtable Submission Link
from IPython import display as IPyDisplay
IPyDisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one Plotby [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import requests
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json
POS_FILE = 'https://osf.io/qyrfn/download'
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'https://osf.io/vfdu6/download'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around.1. hover over a dot to see a tooltip (title, author)2. select a year in the legend (right) to filter dots2. zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?2. Can you determine when deep learning methods started booming ?3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. embed each paper by using abstract+title in SPECTER model4. project based on embedding using UMAP5. visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____
###Markdown
Neuromatch Academy: Week 1, Day 1, Tutorial 1 Pytorch__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent__Content reviewers:__ Kelson Shilling-Scrivo, Deepak Raya__Content editors:__ Anoop Kulkarni__Production editors:__ Arush Tagade, Spiros Chavlis --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole --- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, numpy); set global or environment variables, and load in helper functions for things like plotting.Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
#@title Imports
import torch
import numpy as np
from torch import nn
import matplotlib.pyplot as plt
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
import time
###Output
_____no_output_____
###Markdown
---
###Code
#@title Helper Functions
def checkExercise1(A: torch.Tensor, B: torch.Tensor ,C:torch.Tensor, D:torch.Tensor):
errors = []
#TODO better errors
if not torch.equal(A,torch.ones(20,21)):
errors.append("A is not a 20 by 21 tensor of ones ")
if not np.array_equal( B.numpy(),np.vander([1,2,3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20,21):
errors.append("C is not the correct shape ")
if not torch.equal(D,torch.arange(4,41,step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
print(errors)
def timeFun(f, iterations):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f()
end = time.time()
t_total += end - start
print(f"time taken for {iterations} iterations of {f.__name__}: {t_total}")
###Output
_____no_output_____
###Markdown
Section 1: Welcome to Neuromatch Deep learning course
###Code
#@title Video 1.1: Welcome and History
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="ca21SNqt78I", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [code of conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).Code of conductTODO: ADD EXERCISE: DESCRIBE WHAT YOU HOPE TO GET OUT OF THIS COURSE IN ABOUT 100 WORDS.
###Code
#@title Video 1.2: Syllabus
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="cDvAqG_hAvQ", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the visualization of ICLR papers. Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? Section 2: The Basics of PyTorch PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
#@title Video 2.1: Making Tensors
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="jGKd_4tPGrw", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0,1,2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print("Tensor a:", a)
print("Tensor b:", b)
print("Tensor c:", c)
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1,5)
print("Tensor x:", x)
print("Tensor y:", y)
print("Tensor z:", z)
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print("Tensor a: ", a)
print("Tensor b: ", b)
print("Tensor c: ", c)
print("Tensor d: ", d)
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print("Tensor a: ", a)
print("Numpy array b: ", b)
print("Tensor c: ", c)
print("Numpy array d: ", d)
###Output
_____no_output_____
###Markdown
Exercise 1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
#raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
#@title Video 2.2: Tensor Operators
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="R1R8VoYXBVA", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
torch.add(a, b, out=c)
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1.0, 2, 4, 8])
y = torch.tensor([2, 2, 2, 2])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print("Sum of every element of x: ", x.sum())
print("Sum of the columns of x: ", x.sum(axis=0))
print("Sum of the rows of x: ", x.sum(axis=1))
print("\n")
print("Mean value of all elements of x ", x.mean())
print("Mean values of the columns of x ", x.mean(axis=0))
print("Mean values of the rows of x ", x.mean(axis=1))
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```.torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. Exercise 2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
def simple_operations(a1):
################################################
## TODO for students: create the a2 and a3 matrices
## from the first expression
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
a2 = ...
a3 = ...
answer = ...
return answer
## TODO for students: complete the function above and assign
## the result to a tensor named A
#A = simple_operations(a1)
#print(A)
# to_remove solution
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
def simple_operations(a1):
################################################
## TODO for students: create the a2 and a3 matrices
## from the first expression
#raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10],[12, 1]])
answer = a1 @ a2 + a3
return answer
## TODO for students: compute the expression above and assign
## the result to a tensor named A
A = simple_operations(a1)
print(A)
# Computing expression 2:
def dot_product():
###############################################
## TODO for students: create the b1 and b2 matrices
## from the second expression
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
b1 = ...
b2 = ...
product = ...
return product
## TODO for students: compute the expression above and assign
## the result to a tensor named b
#b = dot_product()
#print(b)
# to_remove solution
# Computing expression 2:
def dot_product():
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
product = torch.dot(b1, b2)
return product
## TODO for students: compute the expression above and assign
## the result to a tensor named b
b = dot_product()
print(b)
###Output
_____no_output_____
###Markdown
Section 2.3 Manipulating Tensors in Pytorch
###Code
#@title Video 2.3: Tensor Indexing
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="0d0KSJ3lJbg", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(" shape of x[0]:", x[0].shape)
print(" shape of x[0][0]:", x[0][0].shape)
print(" shape of x[0][0][0]:", x[0][0][0].shape)
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print("Original z: \n ", z)
# 2D -> 1D
z = z.flatten()
print("Flattened z: \n ", z)
# and back to 2D
z = z.reshape(3, 4)
print("Reshaped (3x4) z: \n", z)
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easilly mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print("x[0]: ", x[0])
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print("x[0]: ", x[0])
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print("shape of y: ", y.shape)
# lets insert a singleton dimension
y = y.unsqueeze(1)
print("shape of y: ", y.shape)
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
**Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Exercise 3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar. e.g: $ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix}$ $ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix}$$ Out = 12 * \begin{bmatrix}2 & 2\\\end{bmatrix} = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension. e.g: $ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix}$ $ Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C (maybe cut this depending on time constraints)**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $E$ reshaped into the dimensions of $D$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors. e.g. $ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$ $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix}$ $ Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$ $ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$ $ E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix}$ $ Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** ```torch.numel()``` is an easy way of finding the number of elements in a tensor
###Code
################################################
## TODO for students: complete these functions
def functionA(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
## TODO for students
raise NotImplementedError("Student exercise: complete function A")
output = torch.zeros(2)
return output
def functionB(C: torch.Tensor) -> torch.Tensor:
raise NotImplementedError("Student exercise: complete function B")
# TODO flatten the tensor C
C = ...
# TODO create the idx tensor to be concatenated to C
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
output = torch.zeros(1)
return output
def functionC(D: torch.Tensor, E: torch.Tensor) -> torch.Tensor:
raise NotImplementedError("Student exercise: complete function C")
# TODO check we can reshape E into the shape of D
if ... :
# TODO reshape E into the shape of D
E = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
D = ...
E = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
##TODO: Implement the functions above and then uncomment the following lines to test your code
#print(functionA(torch.tensor([[1,1], [1,1]]), torch.tensor([ [1,2,3],[1,2,3] ]) ))
#print(functionB(torch.tensor([ [2,3],[-1,10] ])))
#print(functionC(torch.tensor([[1, -1],[-1,3]]), torch.tensor([[2,3,0,2]])))
#print(functionC(torch.tensor([[1, -1],[-1,3]]), torch.tensor([[2,3,0]])))
# to_remove solution
def functionA(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
## TODO for students
output = A.sum(axis = 0) * B.sum()
return output
def functionB(C: torch.Tensor) -> torch.Tensor:
# TODO flatten the tensor C
C = C.flatten()
# TODO create the idx tensor to be concatenated to C
# here we're going to do flatten and unsqueeze, but reshape can also be used
idx_tensor = torch.arange(0, len(C))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(0), C.unsqueeze(0)], axis = 1)
return output
def functionC(D: torch.Tensor, E: torch.Tensor) -> torch.Tensor:
# TODO check we can reshape E into the shape of D
if torch.numel(D) == torch.numel(E) :
# TODO reshape E into the shape of D
E = E.reshape(D.shape)
# TODO sum the two tensors
output = D + E
else:
# TODO flatten both tensors
# this time we'll use reshape to keep the singleton dimension
D = D.reshape(1,-1)
E = E.reshape(1,-1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([D,E], axis = 1)
return output
print(functionA(torch.tensor([[1,1], [1,1]]), torch.tensor([ [1,2,3],[1,2,3] ]) ))
print(functionB(torch.tensor([ [2,3],[-1,10] ])))
print(functionC(torch.tensor([[1, -1],[-1,3]]), torch.tensor([[2,3,0,2]])))
print(functionC(torch.tensor([[1, -1],[-1,3]]), torch.tensor([[2,3,0]])))
###Output
_____no_output_____
###Markdown
Section 2.4: GPUs
###Code
#@title Video 2.4: GPU vs CPU
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="9Mc9GFUtILY", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will note have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
device = "cuda" if torch.cuda.is_available() else "cpu"
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=device)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2,2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(device)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device="cuda")
y = torch.tensor([3, 4, 5], device="cpu")
#Uncomment the following line and run this cell
#z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the ```.to()``` method as before, or the ```.cpu()``` and ```.cuda()``` methods.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call
###Code
x = torch.tensor([0, 1, 2], device="cuda")
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device="cuda")
# moving to cpu
x = x.cpu()
print(x + y)
# moving to gpu
y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Exercise 4: Just how much faster are GPUs?Below is a simple function. Complete the second function, such that it is performs the same operations as the first function, but entirely on the GPU.
###Code
def simpleFun():
x = torch.rand(10000, 10000)
y = torch.rand_like(x)
z = 2*torch.ones(10000, 10000)
x = x * y
x = x @ z
def simpleFunGPU():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
x = ...
y = ...
z = ...
x = ...
y = ...
##TODO: Implement the function above and uncomment the following lines to test your code
#timeFun(simpleFun, iterations = 1 )
#timeFun(simpleFunGPU, iterations = 1)
#to_remove solution
def simpleFun():
x = torch.rand(10000, 10000)
y = torch.rand_like(x)
z = 2*torch.ones(10000, 10000)
x = x * y
x = x @ z
def simpleFunGPU():
x = torch.rand(10000, 10000).to("cuda")
y = torch.rand_like(x).to("cuda")
z = 2*torch.ones(10000, 10000).to("cuda")
x = x * y
x = x @ z
timeFun(simpleFun, iterations = 1)
timeFun(simpleFunGPU, iterations = 1)
###Output
_____no_output_____
###Markdown
Section 2.5: Datasets and Dataloaders
###Code
#@title Video 2.5: Getting Data
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="LSkjPM1gFu0", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Kelson Shilling-Scrivo, Deepak Raya__Content editors:__ Anoop Kulkarni__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
#@markdown Tutorial slides
# you should link the slides for all tutorial videos here (we will store pdfs on osf)
from IPython.display import HTML
HTML('<iframe src="https://docs.google.com/presentation/d/1x_619dh5wCJbPiG3Ix2TFLavWstWYC1JFtAeMGmmUBI/embed?start=false&loop=false&delayms=3000" frameborder="0" width="960" height="569" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>')
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting.Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
#@title Install dependencies
!pip install pandas --quiet
!pip install -U scikit-learn --quiet
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
#@title Figure Settings
import ipywidgets as widgets
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
#@title Helper Functions
def checkExercise1(A: torch.Tensor, B: torch.Tensor,
C:torch.Tensor, D:torch.Tensor):
errors = []
# TODO better errors
if not torch.equal(A, torch.ones(20, 21)):
errors.append("A is not a 20 by 21 tensor of ones ")
if not np.array_equal(B.numpy(), np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
print(errors)
def timeFun(f, iterations):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f()
end = time.time()
t_total += end - start
print(f"time taken for {iterations} iterations of {f.__name__}: {t_total}")
###Output
_____no_output_____
###Markdown
**Scratch Code Cells**If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course
###Code
#@title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing). **Describe what you hope to get out of this course in about 100 words.**
###Code
#@title Video 2: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the visualization of ICLR papers. Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? Section 2: The Basics of PyTorch PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
#@title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0,1,2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print("Tensor a:", a)
print("Tensor b:", b)
print("Tensor c:", c)
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1,5)
print("Tensor x:", x)
print("Tensor y:", y)
print("Tensor z:", z)
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print("Tensor a: ", a)
print("Tensor b: ", b)
print("Tensor c: ", c)
print("Tensor d: ", d)
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `False`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the celll multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
#to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
#@title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
torch.add(a, b, out=c)
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1.0, 2, 4, 8])
y = torch.tensor([2, 2, 2, 2])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```.torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
# Computing expression 1:
def simple_operations(a1):
################################################
## TODO for students: create the a2 and a3 matrices
## from the first expression
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
a2 = ...
a3 = ...
answer = ...
return answer
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
# A = simple_operations(a1)
# print(A)
#to_remove solution
# Computing expression 1:
def simple_operations(a1):
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10],[12, 1]])
answer = a1 @ a2 + a3
return answer
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
## Uncomment below to test your function
A = simple_operations(a1)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
# Computing expression 2:
def dot_product():
###############################################
## TODO for students: create the b1 and b2 matrices
## from the second expression
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
b1 = ...
b2 = ...
product = ...
return product
## Uncomment below to test your function
# b = dot_product()
# print(b)
#to_remove solution
# Computing expression 2:
def dot_product():
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
product = torch.dot(b1, b2)
return product
## Uncomment below to test your function
b = dot_product()
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
#@title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easilly mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
**Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar. e.g: $ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix}$ $ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix}$$ Out = 12 * \begin{bmatrix}2 & 2\\\end{bmatrix} = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension. e.g: $ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix}$ $ Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C (maybe cut this depending on time constraints)**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $E$ reshaped into the dimensions of $D$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors. e.g. $ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$ $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix}$ $ Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$ $ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$ $ E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix}$ $ Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** ```torch.numel()``` is an easy way of finding the number of elements in a tensor
###Code
def functionA(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(C: torch.Tensor) -> torch.Tensor:
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor C
C = ...
# TODO create the idx tensor to be concatenated to C
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(D: torch.Tensor, E: torch.Tensor) -> torch.Tensor:
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape E into the shape of D
if ...:
# TODO reshape E into the shape of D
E = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
D = ...
E = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
#to_remove solution
def functionA(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
# TODO multiplication the sum of the tensors
output = A.sum(axis=0) * B.sum()
return output
def functionB(C: torch.Tensor) -> torch.Tensor:
# TODO flatten the tensor C
C = C.flatten()
# TODO create the idx tensor to be concatenated to C
idx_tensor = torch.arange(0, len(C))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(0), C.unsqueeze(0)], axis=1)
output = torch.zeros(1)
return output
def functionC(D: torch.Tensor, E: torch.Tensor) -> torch.Tensor:
# TODO check we can reshape E into the shape of D
if torch.numel(D) == torch.numel(E):
# TODO reshape E into the shape of D
E = E.reshape(D.shape)
# TODO sum the two tensors
output = D + E
else:
# TODO flatten both tensors
D = D.reshape(1, -1)
E = E.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([D, E], axis=1)
return output
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([0.])tensor([[ 3, 2], [-1, 5]])tensor([[ 1, -1, -1, 3, 2, 3, 0]])``` Section 2.4: GPUs
###Code
#@title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will note have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Let's make some CUDA tensors!
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("WARNING: For this notebook to perform best, "
"if possible, in the menu under `Runtime` -> "
"`Change runtime type.` select `GPU` ")
else:
print("GPU is enabled in this notebook.")
return device
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
device = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=device)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2,2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(device)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device="cuda")
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the ```.to()``` method as before, or the ```.cpu()``` and ```.cuda()``` methods.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call
###Code
x = torch.tensor([0, 1, 2], device="cuda")
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device="cuda")
# moving to cpu
x = x.cpu()
print(x + y)
# moving to gpu
y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function. Complete the second function, such that it is performs the same operations as the first function, but entirely on the GPU.
###Code
def simpleFun():
x = torch.rand(10000, 10000)
y = torch.rand_like(x)
z = 2*torch.ones(10000, 10000)
x = x * y
x = x @ z
print(simpleFun())
def simpleFunGPU():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
x = ...
y = ...
z = ...
x = ...
y = ...
## Implement the function above and uncomment the following lines to test your code
# timeFun(simpleFun, iterations=1)
# timeFun(simpleFunGPU, iterations=1)
#to_remove solution
def simpleFunGPU():
x = torch.rand(10000, 10000).to("cuda")
y = torch.rand_like(x).to("cuda")
z = 2*torch.ones(10000, 10000).to("cuda")
x = x * y
x = x @ z
## Implement the function above and uncomment the following lines to test your code
timeFun(simpleFun, iterations=1)
timeFun(simpleFunGPU, iterations=1)
###Output
_____no_output_____
###Markdown
Section 2.5: Datasets and Dataloaders
###Code
#@title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples:{len(cifar10_data)}")
print(f"Class names:{cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print('Label:', cifar10_data.classes[label])
print('Image size:', image.shape)
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - H × W × C.You need to reorder the dimensions of the tensor using the `permute` method of the tensor.
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
#to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
#@title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g = torch.Generator()g.manual_seed(0)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g )``` We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch (B) and each image has 3 dimensions: channels (C), height (H) and width (W). So, the size of the 4D tensor is B × C × H × W.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images.
###Code
def my_data_load():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# my_data_load()
#to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
my_data_load()
###Output
_____no_output_____
###Markdown
Section 3: Neural NetworksNow it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
#@title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
#@title Generate sample data
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in varios formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
device = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(device)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(device)
print(f"Size y:{y.shape}")
#@title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural NetworkFor this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods: `__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc. `forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it. `predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score. `train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU)
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(device)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.1: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
# X_samples = ...
# print("Sample input:", X_samples)
## Do a forward pass of the network
# output = ...
# print("Network output:", output)
## Predict the label of each point
# y_predicted = ...
# print("Predicted labels:", y_predicted)
#to_remove solution
X_samples = X[0:5]
print("Sample input:", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("Network output:", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("Predicted labels:", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[-0.3032, -0.5563], [-0.1419, -0.3195], [-0.2879, -0.6030], [-0.2665, -0.4831], [-0.2973, -0.5369]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 0, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
#@title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
#@title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, device)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(device)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
#@title Visualize the training process
#@markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
#@title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.2: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
#@title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cnu7pyRx_u0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Interactive Demo: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
#@markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
#@markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'No' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
---Section 4: EthicsLet us watch the coded bias movie together and discuss
###Code
#@title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- Bonus
###Code
#@title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
#@title Video 17: It's a wrap!
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Deepak Raya, Siwei Bai, Kelson Shilling-Scrivo__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
print(f"time taken for {iterations} iterations of {f.__name__}({dim}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Google Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course*Time estimate: ~25mins*
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch*Time estimate: ~2 hours 05 mins* PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function. Complete the second function, such that it is performs the same operations as the first function, but entirely on the GPU. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda:0"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
x = ...
y = ...
z = ...
x = ...
y = ...
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
x = torch.rand(dim, dim).to(device)
y = torch.rand_like(x).to(device)
z = 2*torch.ones(dim, dim).to(device)
x = x * y
x = x @ z
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000): 28.50481time taken for 1 iterations of simpleFunGPU(10000): 0.91102``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural Networks*Time estimate: ~1 hour 30 mins (excluding movie)* Now it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. even if you don't finish the tutorial, still submit!Submitting is the only way we can verify that you attempted each tutorial, which is critical for the award of your completion certificate at the end of the course.Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only. It will not affect the code that is being run around it in any way , so please do not modify, comment out, or worry about any of those lines of code.Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
###Code
# @title Airtable Submission Link
from IPython import display
display.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one Plotby [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import requests
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json
POS_FILE = 'https://osf.io/qyrfn/download'
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'https://osf.io/vfdu6/download'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around.1. hover over a dot to see a tooltip (title, author)2. select a year in the legend (right) to filter dots2. zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?2. Can you determine when deep learning methods started booming ?3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. embed each paper by using abstract+title in SPECTER model4. project based on embedding using UMAP5. visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Deepak Raya, Siwei Bai, Kelson Shilling-Scrivo__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
if device == 'cpu':
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
else:
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Google Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course*Time estimate: ~25mins*
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch*Time estimate: ~2 hours 05 mins* PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function `simpleFun`. Complete this function, such that it performs the operations:- elementwise multiplication- matrix multiplicationThe operations should be able to perfomed on either the CPU or GPU specified by the parameter `device`. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda:0"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = ...
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = ...
# 2D tensor filled with the scalar value 2, dim x dim
z = ...
# elementwise multiplication of x and y
a = ...
# matrix multiplication of x and y
b = ...
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = torch.rand(dim, dim).to(device)
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = torch.rand_like(x).to(device)
# 2D tensor filled with the scalar value 2, dim x dim
z = 2*torch.ones(dim, dim).to(device)
# elementwise multiplication of x and y
a = x * y
# matrix multiplication of x and y
b = x @ z
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000): 28.50481time taken for 1 iterations of simpleFun(10000): 0.91102``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural Networks*Time estimate: ~1 hour 30 mins (excluding movie)* Now it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Kelson Shilling-Scrivo, Deepak Raya, Siwei Bai__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from evaltools.airtable import AirtableForm
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
print(f"time taken for {iterations} iterations of {f.__name__}({dim}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Google Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function. Complete the second function, such that it is performs the same operations as the first function, but entirely on the GPU. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda:0"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
x = ...
y = ...
z = ...
x = ...
y = ...
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
x = torch.rand(dim, dim).to(device)
y = torch.rand_like(x).to(device)
z = 2*torch.ones(dim, dim).to(device)
x = x * y
x = x @ z
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000): 28.50481time taken for 1 iterations of simpleFunGPU(10000): 0.91102``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural NetworksNow it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. even if you don't finish the tutorial, still submit!Submitting is the only way we can verify that you attempted each tutorial, which is critical for the award of your completion certificate at the end of the course.Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only. It will not affect the code that is being run around it in any way , so please do not modify, comment out, or worry about any of those lines of code.Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
###Code
# @title Airtable Submission Link
from IPython import display
display.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one Plotby [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
POS_FILE = 'http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json'
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around.1. hover over a dot to see a tooltip (title, author)2. select a year in the legend (right) to filter dots2. zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?2. Can you determine when deep learning methods started booming ?3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. embed each paper by using abstract+title in SPECTER model4. project based on embedding using UMAP5. visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Kelson Shilling-Scrivo, Deepak Raya, Siwei Bai__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting.Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install -U scikit-learn --quiet
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
#TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20,21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20,21)} (shape: {torch.ones(20,21).shape})")
if not np.array_equal( B.numpy(),np.vander([1,2,3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20,21):
errors.append("C is not the correct shape ")
if not torch.equal(D,torch.arange(4,41,step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
print(f"time taken for {iterations} iterations of {f.__name__}({dim}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Scratch Code Cells**If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print("Tensor a:", a)
print("Tensor b:", b)
print("Tensor c:", c)
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print("Tensor x:", x)
print("Tensor y:", y)
print("Tensor z:", z)
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print("Tensor a: ", a)
print("Tensor b: ", b)
print("Tensor c: ", c)
print("Tensor d: ", d)
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `False`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the celll multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easilly mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.: $ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix}$ $ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix}$$ Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.: $ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix}$ $ Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.: $ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$ $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix}$ $ Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$ $ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$ $ E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix}$ $ Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** ```torch.numel()``` is an easy way of finding the number of elements in a tensor
###Code
def functionA(A, B):
"""
This function takes in two 2D tensors A and B and returns the column sum of
A multiplied by the sum of all the elmements of B, i.e., a scalar.
Args:
A: torch.Tensor
B: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `A` by the sum of `B`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(C):
"""
This function takes in a square matrix C and returns a 2D tensor consisting of
a flattened C with the index of each element appended to this tensor in the
row dimension.
Args:
C: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor C
C = ...
# TODO create the idx tensor to be concatenated to C
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(D, E):
"""
This function takes in two 2D tensors D and E . If the dimensions allow it,
this function returns the elementwise sum of D-shaped E, and D;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
D: torch.Tensor
E: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape E into the shape of D
if ...:
# TODO reshape E into the shape of D
E = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
D = ...
E = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(A, B):
"""
This function takes in two 2D tensors A and B and returns the column sum of
A multiplied by the sum of all the elmements of B, i.e., a scalar.
Args:
A: torch.Tensor
B: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `A` by the sum of `B`.
"""
# TODO multiplication the sum of the tensors
output = A.sum(axis=0) * B.sum()
return output
def functionB(C):
"""
This function takes in a square matrix C and returns a 2D tensor consisting of
a flattened C with the index of each element appended to this tensor in the
row dimension.
Args:
C: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor C
C = C.flatten()
# TODO create the idx tensor to be concatenated to C
idx_tensor = torch.arange(0, len(C))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), C.unsqueeze(1)], axis=1)
return output
def functionC(D, E):
"""
This function takes in two 2D tensors D and E . If the dimensions allow it,
this function returns the elementwise sum of D-shaped E, and D;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
D: torch.Tensor
E: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape E into the shape of D
if torch.numel(D) == torch.numel(E):
# TODO reshape E into the shape of D
E = E.reshape(D.shape)
# TODO sum the two tensors
output = D + E
else:
# TODO flatten both tensors
D = D.reshape(1, -1)
E = E.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([D, E], axis=1)
return output
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([[ 1, -1, -1, 3, 2, 3, 0]])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Let's make some CUDA tensors!
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("WARNING: For this notebook to perform best, "
"if possible, in the menu under `Runtime` -> "
"`Change runtime type.` select `GPU` ")
else:
print("GPU is enabled in this notebook.")
return device
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the ```.to()``` method as before, or the ```.cpu()``` and ```.cuda()``` methods.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function. Complete the second function, such that it is performs the same operations as the first function, but entirely on the GPU.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device='cpu'):
"""
Args:
dim: integer
Returns:
Nothing.
"""
x = torch.rand(dim, dim)
y = torch.rand_like(x)
z = 2*torch.ones(dim, dim)
x = x * y
x = x @ z
# garbage collection
del x
del y
del z
def simpleFunGPU(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
x = ...
y = ...
z = ...
x = ...
y = ...
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# timeFun(simpleFunGPU, dim=dim, iterations=iterations, device=DEVICE)
# to remove solution
def simpleFunGPU(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
x = torch.rand(dim,dim).to(device)
y = torch.rand_like(x).to(device)
z = 2*torch.ones(dim,dim).to(device)
x = x * y
x = x @ z
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
timeFun(f=simpleFunGPU, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
**Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples:{len(cifar10_data)}")
print(f"Class names:{cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print('Label:', cifar10_data.classes[label])
print('Image size:', image.shape)
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural NetworksNow it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in varios formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:", X_samples)
## Do a forward pass of the network
# output = ...
# print("Network output:", output)
## Predict the label of each point
# y_predicted = ...
# print("Predicted labels:", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("Network output:", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("Predicted labels:", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[-0.3032, -0.5563], [-0.1419, -0.3195], [-0.2879, -0.6030], [-0.2665, -0.4831], [-0.2973, -0.5369]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 0, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: EthicsLet us watch the coded bias movie together and discuss
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
--- Bonus
###Code
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: It's a wrap!
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 18: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print('Number of samples:', len(cifar10_data))
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
import random
# Predefined label names
cifar10_labels = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
# Choose a random sample
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print('Label:', cifar10_labels[label])
print('Image size:', image.shape)
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels of the image (in this case we have RGB images). The second dimensions is the height of the image and the third is the width. We can denote this image format as C × H × W. Exercise 5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - H × W × C.You need to reorder the dimensions of the tensor using the `permute` method of the tensor.
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
#to_remove solutions
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1,2,0))
#@title Video 2.6: Train and Test
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="JokSIuPs-ys", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
#@title Video 2.7: Data Augmentation - Transformations
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="sjegA9OBUPw", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
We can now query the next batch from the data loader and inspect it. We can now see that we have a 4D tensor. This is because we have a 64 images in the batch and each image has 3 dimensions: channels, height and width.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Exercise 6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images.
###Code
from torchvision.transforms import Compose, Grayscale
# TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
# data = datasets.CIFAR10( ...
# TODO After implementing the above code, uncomment the following lines to test your code
# Display a random grayscale image
# image, label = data[random.randint(0, len(data))]
# plt.imshow(image.squeeze(), cmap="gray")
#to_remove solution
from torchvision.transforms import Compose, Grayscale
# TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(
root="data",
download=True,
transform=Compose([ToTensor(),Grayscale()])
)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
###Output
_____no_output_____
###Markdown
Section 3: Neural NetworksNow it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
#@title Video 3.1: CSV Files
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="JrC_UAJWYKU", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
#@title Generate sample data
import sklearn.datasets
import pandas as pd
# Create a dataset of 256 points with a little noise
X, y = sklearn.datasets.make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in varios formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
import pandas as pd
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print("Size X:", X_orig.shape)
print("Size y:", y_orig.shape)
# Visualize the dataset
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Convert the 2D points to a float tensor
X = torch.from_numpy(X_orig).type(torch.FloatTensor)
# Upload the tensor to the device
X = X.to(device)
print("Size X:", X.shape)
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(device)
print("Size y:", y.shape)
#@title Video 3.2: Generating the Neural Network
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="PwSzRohUvck", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural NetworkFor this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods: `__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc. `forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it. `predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to interpret the result of the network as a probability distribution. `train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
import torch.nn.functional as F
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU)
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Convert the output of the network to a probability distribution
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(seld, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(device)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Exercise 7: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet.
###Code
#X_samples = ...
#print("Sample input:", X_samples)
# Do a forward pass of the network
#output = ...
#print("Network output:", output)
# Predict the label of each point
# y_predicted = ...
# print("Predicted labels:", y_predicted)
#to_remove solutions
X_samples = X[0:5]
print("Sample input:", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("Network output:", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("Predicted labels:", y_predicted)
###Output
_____no_output_____
###Markdown
Section 3.3: Train Your Neural Network
###Code
#@title Video 3.3: Train the Network
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="4MIqnE4XPaA", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. We will not go into details of the training process for now - this will be covered in the next days. The goal for now is to see your network in action.
###Code
#@title Helper function to plot the decision boundary
from pathlib import Path
def plot_decision_boundary(model, X, y):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function
def train(self, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
y_logits = model(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Replace the train function in the NaiveNet class
NaiveNet.train = train
# Create a new network instance a train it
model = NaiveNet().to(device)
losses = model.train(X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
#@title Visualize the training process
import imageio
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
imageio.mimsave('frames/movie.gif', images)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from IPython import display
from pathlib import Path
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display.Image(data=f.read(), format='png')
#@title Video 3.4: Play with it
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="_GGkapdOdSY", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Exercise 8: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
#@title Video 3.5: XOR Widget
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="cnu7pyRx_u0", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Exercise 9: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: $$y = f(x_1)+f(x_2)-f((x_1+x_2))$$Try to set the weights and biases to implement this function after you played enough :)
###Code
# @title Interactive Demo
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
#@markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'No' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
#@title Video 4: Ethics
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Kt6JLi3rUFU", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
ETHICS: Let us watch the coded bias movie together and discuss Bonus
###Code
#@title Video 5: Be a group
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Sfp6--d_H1A", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
#@title Video 6: It's a wrap!
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="JwTn7ej2dq8", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Deepak Raya, Siwei Bai, Kelson Shilling-Scrivo__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
if device == 'cpu':
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
else:
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Google Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course*Time estimate: ~25mins*
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch*Time estimate: ~2 hours 05 mins* PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.T```. Note the lack of brackets for ```Tensor.T``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function `simpleFun`. Complete this function, such that it performs the operations:- elementwise multiplication- matrix multiplicationThe operations should be able to perfomed on either the CPU or GPU specified by the parameter `device`. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the function, but
## ensure all computations happens on the `device`
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = ...
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = ...
# 2D tensor filled with the scalar value 2, dim x dim
z = ...
# elementwise multiplication of x and y
a = ...
# matrix multiplication of x and y
b = ...
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = torch.rand(dim, dim).to(device)
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = torch.rand_like(x).to(device)
# 2D tensor filled with the scalar value 2, dim x dim
z = 2*torch.ones(dim, dim).to(device)
# elementwise multiplication of x and y
a = x * y
# matrix multiplication of x and y
b = x @ z
del x
del y
del z
del a
del b
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000, cpu): 23.74070time taken for 1 iterations of simpleFun(10000, cuda): 0.87535``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: load the CIFAR10 data,
## but as grayscale images and not as RGB colored.
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural Networks*Time estimate: ~1 hour 30 mins (excluding movie)* Now it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. even if you don't finish the tutorial, still submit!Submitting is the only way we can verify that you attempted each tutorial, which is critical for us to be able to track your progress. TL;DR: Basic tutorial workflow1. work through the tutorial, answering Think! questions and code exercises2. at end each tutorial, (even if tutorial incomplete) run the airtable submission code cell3. Push the submission button4. if the last tutorial of the day, Submission button will also take you to the end of the day survey on a new page. complete that and submit it. Submission FAQs: 1. What if I want to change my answers to previous discussion questions? > you are free to change and resubmit any of the answers and Think! questions as many times as you like. However, please only run the airtable submission code and click on the link once you are ready to submit.2. Okay, but what if I submitted my airtable anyway and reallly want to resubmit?> After making changes, you can re-run the airtable submission cell code cell. This will result in a second submission from you for the data. This will make Darryl sad as it will be more work for him to clean up the data later. 3. HELP! I accidentally ran the code to generate the airtable submission button before I was ready to submit! what do I do?> If you run the code to generate the link, anything that happens afterwards will not be captured. Complete the tutorial and make sure to re-run the airtable submission again when you are finished before pressing the submission button. 4. What if I want to work on this on my own later, should I wait to submit until I'm finished?> Please submit wherever you are at the end of the day. It's graet that you want to keep working on this, but it's important to see the places where we tried things that didn't quite work out, so we can fix them for next year. Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only. It will not affect the code that is being run around it in any way , so please do not modify, comment out, or worry about any of those lines of code.Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
###Code
# @title Airtable Submission Link
from IPython import display
display.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one Plotby [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import requests
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json
POS_FILE = 'https://osf.io/qyrfn/download'
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'https://osf.io/vfdu6/download'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around.1. hover over a dot to see a tooltip (title, author)2. select a year in the legend (right) to filter dots2. zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?2. Can you determine when deep learning methods started booming ?3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. embed each paper by using abstract+title in SPECTER model4. project based on embedding using UMAP5. visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____
###Markdown
Tutorial 1: PyTorch**Week 1, Day 1: Basics and PyTorch****By Neuromatch Academy**__Content creators:__ Shubh Pachchigar, Vladimir Haltakov, Matthew Sargent, Konrad Kording__Content reviewers:__ Kelson Shilling-Scrivo, Deepak Raya, Siwei Bai__Content editors:__ Anoop Kulkarni, Spiros Chavlis__Production editors:__ Arush Tagade, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** --- Tutorial ObjectivesThen have a few specific objectives for this tutorial:* Learn about PyTorch and tensors* Tensor Manipulations* Data Loading* GPUs and Cuda Tensors* Train NaiveNet* Get to know your pod* Start thinking about the course as a whole
###Code
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/wcjrv/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
###Output
_____no_output_____
###Markdown
--- Setup Throughout your Neuromatch tutorials, most (probably all!) notebooks contain setup cells. These cells will import the required Python packages (e.g., PyTorch, NumPy); set global or environment variables, and load in helper functions for things like plotting. In some tutorials, you will notice that we install some dependencies even if they are preinstalled on google colab or kaggle. This happens because we have added automation to our repository through [GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/introduction-to-github-actions).Be sure to run all of the cells in the setup section. Feel free to expand them and have a look at what you are loading in, but you should be able to fulfill the learning objectives of every tutorial without having to look at these cells.If you start building your own projects built on this code base we highly recommend looking at them in more detail.
###Code
# @title Install dependencies
!pip install pandas --quiet
!pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# Imports
import time
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch import nn
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# @title Figure Settings
import ipywidgets as widgets
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# @title Helper Functions
atform = AirtableForm('appn7VdPRseSoMXEG','W1D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/97e94a29-0b3a-4e16-9a8d-f6838a5bd83d')
def checkExercise1(A, B, C, D):
"""
Helper function for checking exercise.
Args:
A: torch.Tensor
B: torch.Tensor
C: torch.Tensor
D: torch.Tensor
Returns:
Nothing.
"""
errors = []
# TODO better errors and error handling
if not torch.equal(A.to(int),torch.ones(20, 21).to(int)):
errors.append(f"Got: {A} \n Expected: {torch.ones(20, 21)} (shape: {torch.ones(20, 21).shape})")
if not np.array_equal( B.numpy(),np.vander([1, 2, 3], 4)):
errors.append("B is not a tensor containing the elements of Z ")
if C.shape != (20, 21):
errors.append("C is not the correct shape ")
if not torch.equal(D, torch.arange(4, 41, step=2)):
errors.append("D does not contain the correct elements")
if errors == []:
print("All correct!")
else:
[print(e) for e in errors]
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
print(f"time taken for {iterations} iterations of {f.__name__}({dim}): {t_total:.5f}")
###Output
_____no_output_____
###Markdown
**Important note: Google Colab users***Scratch Code Cells*If you want to quickly try out something or take a look at the data you can use scratch code cells. They allow you to run Python code, but will not mess up the structure of your notebook.To open a new scratch cell go to *Insert* → *Scratch code cell*. Section 1: Welcome to Neuromatch Deep learning course
###Code
# @title Video 1: Welcome and History
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Av411n7oL", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ca21SNqt78I", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing
atform.add_event('Video 1: Welcome and History')
display(out)
###Output
_____no_output_____
###Markdown
*This will be an intensive 3 week adventure. We will all learn Deep Learning. In a group. Groups need standards. Read our [Code of Conduct](https://docs.google.com/document/d/1eHKIkaNbAlbx_92tLQelXnicKXEcvFzlyzzeWjEtifM/edit?usp=sharing).
###Code
# @title Video 2: Why DL is cool
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1gf4y1j7UZ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"l-K6495BN-4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 2: Why DL is cool')
display(out)
###Output
_____no_output_____
###Markdown
**Describe what you hope to get out of this course in about 100 words.** --- Section 2: The Basics of PyTorch PyTorch is a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- A deep learning platform that provides significant flexibility and speedAt its core, PyTorch provides a few key features:- A multidimensional [Tensor](https://pytorch.org/docs/stable/tensors.html) object, similar to [NumPy Array](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) but with GPU acceleration.- An optimized **autograd** engine for automatically computing derivatives.- A clean, modular API for building and deploying **deep learning models**.You can find more information about PyTorch in the appendix. Section 2.1: Creating Tensors
###Code
# @title Video 3: Making Tensors
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Rw411d7Uy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jGKd_4tPGrw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 3: Making Tensors')
display(out)
###Output
_____no_output_____
###Markdown
There are various ways of creating tensors, and when doing any real deep learning project we will usually have to do so. **Construct tensors directly:**---
###Code
# we can construct a tensor directly from some common python iterables,
# such as list and tuple nested iterables can also be handled as long as the
# dimensions make sense
# tensor from a list
a = torch.tensor([0, 1, 2])
#tensor from a tuple of tuples
b = ((1.0, 1.1), (1.2, 1.3))
b = torch.tensor(b)
# tensor from a numpy array
c = np.ones([2, 3])
c = torch.tensor(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
###Output
_____no_output_____
###Markdown
**Some common tensor constructors:**---
###Code
# the numerical arguments we pass to these constructors
# determine the shape of the output tensor
x = torch.ones(5, 3)
y = torch.zeros(2)
z = torch.empty(1, 1, 5)
print(f"Tensor x: {x}")
print(f"Tensor y: {y}")
print(f"Tensor z: {z}")
###Output
_____no_output_____
###Markdown
Notice that ```.empty()``` does not return zeros, but seemingly random small numbers. Unlike ```.zeros()```, which initialises the elements of the tensor with zeros, ```.empty()``` just allocates the memory. It is hence a bit faster if you are looking to just create a tensor. **Creating random tensors and tensors like other tensors:**---
###Code
# there are also constructors for random numbers
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
# there are also constructors that allow us to construct
# a tensor according to the above constructors, but with
# dimensions equal to another tensor
c = torch.zeros_like(a)
d = torch.rand_like(c)
print(f"Tensor a: {a}")
print(f"Tensor b: {b}")
print(f"Tensor c: {c}")
print(f"Tensor d: {d}")
###Output
_____no_output_____
###Markdown
*Reproducibility*: - PyTorch random number generator: You can use `torch.manual_seed()` to seed the RNG for all devices (both CPU and CUDA)```pythonimport torchtorch.manual_seed(0)```- For custom operators, you might need to set python seed as well:```pythonimport randomrandom.seed(0)```- Random number generators in other libraries```pythonimport numpy as npnp.random.seed(0)``` Here, we define for you a function called `set_seed` that does the job for you!
###Code
def set_seed(seed=None, seed_torch=True):
"""
Function that controls randomness. NumPy and random modules must be imported.
Args:
seed : Integer
A non-negative integer that defines the random state. Default is `None`.
seed_torch : Boolean
If `True` sets the random seed for pytorch tensors, so pytorch module
must be imported. Default is `True`.
Returns:
Nothing.
"""
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
###Output
_____no_output_____
###Markdown
Now, let's use the `set_seed` function in the previous example. Execute the cell multiple times to verify that the numbers printed are always the same.
###Code
def simplefun(seed=True, my_seed=None):
if seed:
set_seed(seed=my_seed)
# uniform distribution
a = torch.rand(1, 3)
# normal distribution
b = torch.randn(3, 4)
print("Tensor a: ", a)
print("Tensor b: ", b)
simplefun(seed=True, my_seed=0) # Turn `seed` to `False` or change `my_seed`
###Output
_____no_output_____
###Markdown
**Numpy-like number ranges:**---The ```.arange()``` and ```.linspace()``` behave how you would expect them to if you are familar with numpy.
###Code
a = torch.arange(0, 10, step=1)
b = np.arange(0, 10, step=1)
c = torch.linspace(0, 5, steps=11)
d = np.linspace(0, 5, num=11)
print(f"Tensor a: {a}\n")
print(f"Numpy array b: {b}\n")
print(f"Tensor c: {c}\n")
print(f"Numpy array d: {d}\n")
###Output
_____no_output_____
###Markdown
Coding Exercise 2.1: Creating TensorsBelow you will find some incomplete code. Fill in the missing code to construct the specified tensors.We want the tensors: $A:$ 20 by 21 tensor consisting of ones$B:$ a tensor with elements equal to the elements of numpy array $Z$$C:$ a tensor with the same number of elements as $A$ but with values $\sim U(0,1)$$D:$ a 1D tensor containing the even numbers between 4 and 40 inclusive.
###Code
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
#################################################
## TODO for students: fill in the missing code
## from the first expression
raise NotImplementedError("Student exercise: say what they should have done")
#################################################
A = ...
B = ...
C = ...
D = ...
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
# A, B, C, D = tensor_creation(Z)
# checkExercise1(A, B, C, D)
# to_remove solution
def tensor_creation(Z):
"""A function that creates various tensors.
Args:
Z (numpy.ndarray): An array of shape
Returns:
A : 20 by 21 tensor consisting of ones
B : a tensor with elements equal to the elements of numpy array Z
C : a tensor with the same number of elements as A but with values ∼U(0,1)
D : a 1D tensor containing the even numbers between 4 and 40 inclusive.
"""
A = torch.ones(20, 21)
B = torch.tensor(Z)
C = torch.rand_like(A)
D = torch.arange(4, 41, step=2)
return A, B, C, D
# add timing to airtable
atform.add_event('Coding Exercise 2.1: Creating Tensors')
# numpy array to copy later
Z = np.vander([1, 2, 3], 4)
# Uncomment below to check your function!
A, B, C, D = tensor_creation(Z)
checkExercise1(A, B, C, D)
###Output
_____no_output_____
###Markdown
```All correct!``` Section 2.2: Operations in PyTorch**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
# @title Video 4: Tensor Operators
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1G44y127As", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"R1R8VoYXBVA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 4: Tensor Operators')
display(out)
###Output
_____no_output_____
###Markdown
**Tensor-Tensor operations**We can perform operations on tensors using methods under ```torch.```
###Code
a = torch.ones(5, 3)
b = torch.rand(5, 3)
c = torch.empty(5, 3)
d = torch.empty(5, 3)
# this only works if c and d already exist
torch.add(a, b, out=c)
#Pointwise Multiplication of a and b
torch.multiply(a, b, out=d)
print(c)
print(d)
###Output
_____no_output_____
###Markdown
However, in PyTorch most common Python operators are overridden.The common standard arithmetic operators (+, -, *, /, and **) have all been lifted to elementwise operations
###Code
x = torch.tensor([1, 2, 4, 8])
y = torch.tensor([1, 2, 3, 4])
x + y, x - y, x * y, x / y, x**y # The ** operator is exponentiation
###Output
_____no_output_____
###Markdown
**Tensor Methods** Tensors also have a number of common arithmetic operations built in. A full list of **all** methods can be found in the appendix (there are a lot!) All of these operations should have similar syntax to their numpy equivalents.(Feel free to skip if you already know this!)
###Code
x = torch.rand(3, 3)
print(x)
print("\n")
# sum() - note the axis is the axis you move across when summing
print(f"Sum of every element of x: {x.sum()}")
print(f"Sum of the columns of x: {x.sum(axis=0)}")
print(f"Sum of the rows of x: {x.sum(axis=1)}")
print("\n")
print(f"Mean value of all elements of x {x.mean()}")
print(f"Mean values of the columns of x {x.mean(axis=0)}")
print(f"Mean values of the rows of x {x.mean(axis=1)}")
###Output
_____no_output_____
###Markdown
**Matrix Operations**The ```@``` symbol is overridden to represent matrix multiplication. You can also use ```torch.matmul()``` to multiply tensors. For dot multiplication, you can use ```torch.dot()```, or manipulate the axes of your tensors and do matrix multiplication (we will cover that in the next section). Transposes of 2D tensors are obtained using ```torch.t()``` or ```Tensor.t```. Note the lack of brackets for ```Tensor.t``` - it is an attribute, not a method. Coding Exercise 2.2 : Simple tensor operationsBelow are two expressions involving operations on matrices. $$ \textbf{A} = \begin{bmatrix}2 &4 \\5 & 7 \end{bmatrix} \begin{bmatrix} 1 &1 \\2 & 3\end{bmatrix} + \begin{bmatrix}10 & 10 \\ 12 & 1 \end{bmatrix} $$and$$ b = \begin{bmatrix} 3 \\ 5 \\ 7\end{bmatrix} \cdot \begin{bmatrix} 2 \\ 4 \\ 8\end{bmatrix}$$The code block below that computes these expressions using PyTorch is incomplete - fill in the missing lines.
###Code
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
################################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
################################################
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = ...
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
# A = simple_operations(a1, a2, a3)
# print(A)
# to_remove solution
def simple_operations(a1: torch.Tensor, a2: torch.Tensor, a3: torch.Tensor):
# multiplication of tensor a1 with tensor a2 and then add it with tensor a3
answer = a1 @ a2 + a3
return answer
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-simple_operations')
# Computing expression 1:
# init our tensors
a1 = torch.tensor([[2, 4], [5, 7]])
a2 = torch.tensor([[1, 1], [2, 3]])
a3 = torch.tensor([[10, 10], [12, 1]])
## uncomment to test your function
A = simple_operations(a1, a2, a3)
print(A)
###Output
_____no_output_____
###Markdown
```tensor([[20, 24], [31, 27]])```
###Code
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
###############################################
## TODO for students: complete the first computation using the argument matricies
raise NotImplementedError("Student exercise: fill in the missing code to complete the operation")
###############################################
# Use torch.dot() to compute the dot product of two tensors
product = ...
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
# b = dot_product(b1, b2)
# print(b)
# to_remove solution
def dot_product(b1: torch.Tensor, b2: torch.Tensor):
# Use torch.dot() to compute the dot product of two tensors
product = torch.dot(b1, b2)
return product
# add timing to airtable
atform.add_event('Coding Exercise 2.2 : Simple tensor operations-dot_product')
# Computing expression 2:
b1 = torch.tensor([3, 5, 7])
b2 = torch.tensor([2, 4, 8])
## Uncomment to test your function
b = dot_product(b1, b2)
print(b)
###Output
_____no_output_____
###Markdown
```tensor(82)``` Section 2.3 Manipulating Tensors in Pytorch
###Code
# @title Video 5: Tensor Indexing
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1BM4y1K7pD", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"0d0KSJ3lJbg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 5: Tensor Indexing')
display(out)
###Output
_____no_output_____
###Markdown
**Indexing**Just as in numpy, elements in a tensor can be accessed by index. As in any numpy array, the first element has index 0 and ranges are specified to include the first but before the last element. We can access elements according to their relative position to the end of the list by using negative indices. Indexing is also referred to as slicing.For example, [-1] selects the last element; [1:3] selects the second and the third elements, and [:-2] will select all elements excluding the last and second-to-last elements.
###Code
x = torch.arange(0, 10)
print(x)
print(x[-1])
print(x[1:3])
print(x[:-2])
###Output
_____no_output_____
###Markdown
When we have multidimensional tensors, indexing rules work the same way as numpy.
###Code
# make a 5D tensor
x = torch.rand(1, 2, 3, 4, 5)
print(f" shape of x[0]:{x[0].shape}")
print(f" shape of x[0][0]:{x[0][0].shape}")
print(f" shape of x[0][0][0]:{x[0][0][0].shape}")
###Output
_____no_output_____
###Markdown
**Flatten and reshape**There are various methods for reshaping tensors. It is common to have to express 2D data in 1D format. Similarly, it is also common to have to reshape a 1D tensor into a 2D tensor. We can achieve this with the ```.flatten()``` and ```.reshape()``` methods.
###Code
z = torch.arange(12).reshape(6, 2)
print(f"Original z: \n {z}")
# 2D -> 1D
z = z.flatten()
print(f"Flattened z: \n {z}")
# and back to 2D
z = z.reshape(3, 4)
print(f"Reshaped (3x4) z: \n {z}")
###Output
_____no_output_____
###Markdown
You will also see the ```.view()``` methods used a lot to reshape tensors. There is a subtle difference between ```.view()``` and ```.reshape()```, though for now we will just use ```.reshape()```. The documentation can be found in the appendix. **Squeezing tensors**When processing batches of data, you will quite often be left with singleton dimensions. e.g. [1,10] or [256, 1, 3]. This dimension can quite easily mess up your matrix operations if you don't plan on it being there...In order to compress tensors along their singleton dimensions we can use the ```.squeeze()``` method. We can use the ```.unsqueeze()``` method to do the opposite.
###Code
x = torch.randn(1, 10)
# printing the zeroth element of the tensor will not give us the first number!
print(x.shape)
print(f"x[0]: {x[0]}")
###Output
_____no_output_____
###Markdown
Because of that pesky singleton dimension, x[0] gave us the first row instead!
###Code
# lets get rid of that singleton dimension and see what happens now
x = x.squeeze(0)
print(x.shape)
print(f"x[0]: {x[0]}")
# adding singleton dimensions works a similar way, and is often used when tensors
# being added need same number of dimensions
y = torch.randn(5, 5)
print(f"shape of y: {y.shape}")
# lets insert a singleton dimension
y = y.unsqueeze(1)
print(f"shape of y: {y.shape}")
###Output
_____no_output_____
###Markdown
**Permutation**Sometimes our dimensions will be in the wrong order! For example, we may be dealing with RGB images with dim [3x48x64], but our pipeline expects the colour dimension to be the last dimension i.e. [48x64x3]. To get around this we can use ```.permute()```
###Code
# `x` has dimensions [color,image_height,image_width]
x = torch.rand(3, 48, 64)
# we want to permute our tensor to be [ image_height , image_width , color ]
x = x.permute(1, 2, 0)
# permute(1,2,0) means:
# the 0th dim of my new tensor = the 1st dim of my old tensor
# the 1st dim of my new tensor = the 2nd
# the 2nd dim of my new tensor = the 0th
print(x.shape)
###Output
_____no_output_____
###Markdown
You may also see ```.transpose()``` used. This works in a similar way as permute, but can only swap two dimensions at once. **Concatenation** In this example, we concatenate two matrices along rows (axis 0, the first element of the shape) vs. columns (axis 1, the second element of the shape). We can see that the first output tensor’s axis-0 length ( 6 ) is the sum of the two input tensors’ axis-0 lengths ( 3+3 ); while the second output tensor’s axis-1 length ( 8 ) is the sum of the two input tensors’ axis-1 lengths ( 4+4 ).
###Code
# Create two tensors of the same shape
x = torch.arange(12, dtype=torch.float32).reshape((3, 4))
y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
#concatenate them along rows
cat_rows = torch.cat((x, y), dim=0)
# concatenate along columns
cat_cols = torch.cat((x, y), dim=1)
# printing outputs
print('Concatenated by rows: shape{} \n {}'.format(list(cat_rows.shape), cat_rows))
print('\n Concatenated by colums: shape{} \n {}'.format(list(cat_cols.shape), cat_cols))
###Output
_____no_output_____
###Markdown
**Conversion to Other Python Objects**Converting to a NumPy tensor, or vice versa, is easy. The converted result does not share memory. This minor inconvenience is actually quite important: when you perform operations on the CPU or on GPUs, you do not want to halt computation, waiting to see whether the NumPy package of Python might want to be doing something else with the same chunk of memory.When converting to a numpy array, the information being tracked by the tensor will be lost i.e. the computational graph. This will be covered in detail when you are introduced to autograd tomorrow!
###Code
x = torch.randn(5)
print(f"x: {x} | x type: {x.type()}")
y = x.numpy()
print(f"y: {y} | y type: {type(y)}")
z = torch.tensor(y)
print(f"z: {z} | z type: {z.type()}")
###Output
_____no_output_____
###Markdown
To convert a size-1 tensor to a Python scalar, we can invoke the item function or Python’s built-in functions.
###Code
a = torch.tensor([3.5])
a, a.item(), float(a), int(a)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.3: Manipulating TensorsUsing a combination of the methods discussed above, complete the functions below. **Function A** This function takes in two 2D tensors $A$ and $B$ and returns the column sum of A multiplied by the sum of all the elmements of $B$ i.e. a scalar, e.g.,:$ A = \begin{bmatrix}1 & 1 \\1 & 1 \end{bmatrix} \,$and$ B = \begin{bmatrix}1 & 2 & 3\\1 & 2 & 3 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix} 2 & 2 \\\end{bmatrix} \cdot 12 = \begin{bmatrix}24 & 24\\\end{bmatrix}$**Function B** This function takes in a square matrix $C$ and returns a 2D tensor consisting of a flattened $C$ with the index of each element appended to this tensor in the row dimension, e.g.,:$ C = \begin{bmatrix}2 & 3 \\-1 & 10 \end{bmatrix} \,$so$ \, Out = \begin{bmatrix}0 & 2 \\1 & 3 \\2 & -1 \\3 & 10\end{bmatrix}$**Hint:** pay close attention to singleton dimensions**Function C**This function takes in two 2D tensors $D$ and $E$. If the dimensions allow it, this function returns the elementwise sum of $D$-shaped $E$, and $D$; else this function returns a 1D tensor that is the concatenation of the two tensors, e.g.,:$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix} \,$and $ E = \begin{bmatrix}2 & 3 & 0 & 2 \\\end{bmatrix} \, $so$ \, Out = \begin{bmatrix}3 & 2 \\-1 & 5 \end{bmatrix}$$ D = \begin{bmatrix}1 & -1 \\-1 & 3 \end{bmatrix}$and$ \, E = \begin{bmatrix}2 & 3 & 0 \\\end{bmatrix} \,$so$ \, Out = \begin{bmatrix}1 & -1 & -1 & 3 & 2 & 3 & 0 \end{bmatrix}$**Hint:** `torch.numel()` is an easy way of finding the number of elements in a tensor
###Code
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
################################################
## TODO for students: complete functionA
raise NotImplementedError("Student exercise: complete function A")
################################################
# TODO multiplication the sum of the tensors
output = ...
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function B")
################################################
# TODO flatten the tensor `my_tensor`
my_tensor = ...
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = ...
# TODO concatenate the two tensors
output = ...
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
################################################
## TODO for students: complete functionB
raise NotImplementedError("Student exercise: complete function C")
################################################
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if ...:
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = ...
# TODO sum the two tensors
output = ...
else:
# TODO flatten both tensors
my_tensor1 = ...
my_tensor2 = ...
# TODO concatenate the two tensors in the correct dimension
output = ...
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
# print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
# print(functionB(torch.tensor([[2, 3], [-1, 10]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
# print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
# to_remove solution
def functionA(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`
and returns the column sum of
`my_tensor1` multiplied by the sum of all the elmements of `my_tensor2`,
i.e., a scalar.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
The multiplication of the column sum of `my_tensor1` by the sum of
`my_tensor2`.
"""
# TODO multiplication the sum of the tensors
output = my_tensor1.sum(axis=0) * my_tensor2.sum()
return output
def functionB(my_tensor):
"""
This function takes in a square matrix `my_tensor` and returns a 2D tensor
consisting of a flattened `my_tensor` with the index of each element
appended to this tensor in the row dimension.
Args:
my_tensor: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO flatten the tensor `my_tensor`
my_tensor = my_tensor.flatten()
# TODO create the idx tensor to be concatenated to `my_tensor`
idx_tensor = torch.arange(0, len(my_tensor))
# TODO concatenate the two tensors
output = torch.cat([idx_tensor.unsqueeze(1), my_tensor.unsqueeze(1)], axis=1)
return output
def functionC(my_tensor1, my_tensor2):
"""
This function takes in two 2D tensors `my_tensor1` and `my_tensor2`.
If the dimensions allow it, it returns the
elementwise sum of `my_tensor1`-shaped `my_tensor2`, and `my_tensor2`;
else this function returns a 1D tensor that is the concatenation of the
two tensors.
Args:
my_tensor1: torch.Tensor
my_tensor2: torch.Tensor
Retuns:
output: torch.Tensor
Concatenated tensor.
"""
# TODO check we can reshape `my_tensor2` into the shape of `my_tensor1`
if torch.numel(my_tensor1) == torch.numel(my_tensor2):
# TODO reshape `my_tensor2` into the shape of `my_tensor1`
my_tensor2 = my_tensor2.reshape(my_tensor1.shape)
# TODO sum the two tensors
output = my_tensor1 + my_tensor2
else:
# TODO flatten both tensors
my_tensor1 = my_tensor1.reshape(1, -1)
my_tensor2 = my_tensor2.reshape(1, -1)
# TODO concatenate the two tensors in the correct dimension
output = torch.cat([my_tensor1, my_tensor2], axis=1).squeeze()
return output
# add timing to airtable
atform.add_event('Coding Exercise 2.3: Manipulating Tensors')
## Implement the functions above and then uncomment the following lines to test your code
print(functionA(torch.tensor([[1, 1], [1, 1]]), torch.tensor([[1, 2, 3], [1, 2, 3]])))
print(functionB(torch.tensor([[2, 3], [-1, 10]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0, 2]])))
print(functionC(torch.tensor([[1, -1], [-1, 3]]), torch.tensor([[2, 3, 0]])))
###Output
_____no_output_____
###Markdown
```tensor([24, 24])tensor([[ 0, 2], [ 1, 3], [ 2, -1], [ 3, 10]])tensor([[ 3, 2], [-1, 5]])tensor([ 1, -1, -1, 3, 2, 3, 0])``` Section 2.4: GPUs
###Code
# @title Video 6: GPU vs CPU
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1nM4y1K7qx", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"9Mc9GFUtILY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 6: GPU vs CPU')
display(out)
###Output
_____no_output_____
###Markdown
By default, when we create a tensor it will *not* live on the GPU!
###Code
x = torch.randn(10)
print(x.device)
###Output
_____no_output_____
###Markdown
When using Colab notebooks by default will not have access to a GPU. In order to start using GPUs we need to request one. We can do this by going to the runtime tab at the top of the page. By following Runtime -> Change runtime type and selecting "GPU" from the Hardware Accelerator dropdown list, we can start playing with sending tensors to GPUs.Once you have done this your runtime will restart and you will need to rerun the first setup cell to reimport PyTorch. Then proceed to the next cell.(For more information on the GPU usage policy you can view in the appendix) **Now we have a GPU** The cell below should return True.
###Code
print(torch.cuda.is_available())
###Output
_____no_output_____
###Markdown
CUDA is an API developed by Nvidia for interfacing with GPUs. PyTorch provides us with a layer of abstraction, and allows us to launch CUDA kernels using pure Python. *NOTE I am assuming that GPU stuff might be covered in more detail on another day but there could be a bit more detail here.*In short, we get the power of parallising our tensor computations on GPUs, whilst only writing (relatively) simple Python!Here, we define the function `set_device`, which returns the device use in the notebook, i.e., `cpu` or `cuda`. Unless otherwise specified, we use this function on top of every tutorial, and we store the device variable such as```pythonDEVICE = set_device()```Let's define the function using the PyTorch package `torch.cuda`, which is lazily initialized, so we can always import it, and use `is_available()` to determine if our system supports CUDA.
###Code
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
###Output
_____no_output_____
###Markdown
Let's make some CUDA tensors!
###Code
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
DEVICE = set_device()
# we can specify a device when we first create our tensor
x = torch.randn(2, 2, device=DEVICE)
print(x.dtype)
print(x.device)
# we can also use the .to() method to change the device a tensor lives on
y = torch.randn(2, 2)
print(f"y before calling to() | device: {y.device} | dtype: {y.type()}")
y = y.to(DEVICE)
print(f"y after calling to() | device: {y.device} | dtype: {y.type()}")
###Output
_____no_output_____
###Markdown
**Operations between cpu tensors and cuda tensors**Note that the type of the tensor changed after calling ```.to()```. What happens if we try and perform operations on tensors on devices?
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
# Uncomment the following line and run this cell
# z = x + y
###Output
_____no_output_____
###Markdown
We cannot combine cuda tensors and cpu tensors in this fashion. If we want to compute an operation that combines tensors on different devices, we need to move them first! We can use the `.to()` method as before, or the `.cpu()` and `.cuda()` methods. Note that using the `.cuda()` will throw an error if CUDA is not enabled in your machine.Genrally in this course all Deep learning is done on the GPU and any computation is done on the CPU, so sometimes we have to pass things back and forth so you'll see us call.
###Code
x = torch.tensor([0, 1, 2], device=DEVICE)
y = torch.tensor([3, 4, 5], device="cpu")
z = torch.tensor([6, 7, 8], device=DEVICE)
# moving to cpu
x = x.to("cpu") # alternatively, you can use x = x.cpu()
print(x + y)
# moving to gpu
y = y.to(DEVICE) # alternatively, you can use y = y.cuda()
print(y + z)
###Output
_____no_output_____
###Markdown
Coding Exercise 2.4: Just how much faster are GPUs?Below is a simple function. Complete the second function, such that it is performs the same operations as the first function, but entirely on the GPU. We will use the helper function `timeFun(f, dim, iterations, device)`.
###Code
dim = 10000
iterations = 1
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda:0"
Returns:
Nothing.
"""
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to create the tensors")
###############################################
x = ...
y = ...
z = ...
x = ...
y = ...
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
# timeFun(f=simpleFun, dim=dim, iterations=iterations)
# timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
# to_remove solution
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
x = torch.rand(dim, dim).to(device)
y = torch.rand_like(x).to(device)
z = 2*torch.ones(dim, dim).to(device)
x = x * y
x = x @ z
del x
del y
del z
## TODO: Implement the function above and uncomment the following lines to test your code
timeFun(f=simpleFun, dim=dim, iterations=iterations)
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)
###Output
_____no_output_____
###Markdown
Sample output (depends on your hardware)```time taken for 1 iterations of simpleFun(10000): 28.50481time taken for 1 iterations of simpleFunGPU(10000): 0.91102``` **Discuss!**Try and reduce the dimensions of the tensors and increase the iterations. You can get to a point where the cpu only function is faster than the GPU function. Why might this be? Section 2.5: Datasets and Dataloaders
###Code
# @title Video 7: Getting Data
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1744y127SQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"LSkjPM1gFu0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 7: Getting Data')
display(out)
###Output
_____no_output_____
###Markdown
When training neural network models you will be working with large amounts of data. Fortunately, PyTorch offers some great tools that help you organize and manipulate your data samples.
###Code
# Import dataset and dataloaders related packages
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Grayscale
###Output
_____no_output_____
###Markdown
**Datasets**The `torchvision` package gives you easy access to many of the publicly available datasets. Let's load the [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset, which contains color images of 10 different classes, like vehicles and animals.Creating an object of type `datasets.CIFAR10` will automatically download and load all images from the dataset. The resulting data structure can be treated as a list containing data samples and their corresponding labels.
###Code
# Download and load the images from the CIFAR10 dataset
cifar10_data = datasets.CIFAR10(
root="data", # path where the images will be stored
download=True, # all images should be downloaded
transform=ToTensor() # transform the images to tensors
)
# Print the number of samples in the loaded dataset
print(f"Number of samples: {len(cifar10_data)}")
print(f"Class names: {cifar10_data.classes}")
###Output
_____no_output_____
###Markdown
We have 50000 samples loaded. Now let's take a look at one of them in detail. Each sample consists of an image and its corresponding label.
###Code
# Choose a random sample
random.seed(2021)
image, label = cifar10_data[random.randint(0, len(cifar10_data))]
print(f"Label: {cifar10_data.classes[label]}")
print(f"Image size: {image.shape}")
###Output
_____no_output_____
###Markdown
Color images are modeled as 3 dimensional tensors. The first dimension corresponds to the channels (C) of the image (in this case we have RGB images). The second dimensions is the height (H) of the image and the third is the width (W). We can denote this image format as C × H × W. Coding Exercise 2.5: Display an image from the datasetLet's try to display the image using `matplotlib`. The code below will not work, because `imshow` expects to have the image in a different format - $H \times W \times C$.You need to reorder the dimensions of the tensor using the `permute` method of the tensor. PyTorch `torch.permute(*dims)` rearranges the original tensor according to the desired ordering and returns a new multidimensional rotated tensor. The size of the returned tensor remains the same as that of the original.**Code hint:**```python create a tensor of size 2 x 4input_var = torch.randn(2, 4) print its size and the tensorprint(input_var.size())print(input_var) dimensions permutedinput_var = input_var.permute(1, 0) print its size and the permuted tensorprint(input_var.size())print(input_var)```
###Code
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
# plt.imshow(image.permute(...))
# plt.show()
# to_remove solution
# TODO: Uncomment the following line to see the error that arises from the current image format
# plt.imshow(image)
# TODO: Comment the above line and fix this code by reordering the tensor dimensions
plt.imshow(image.permute(1, 2, 0))
plt.show()
#@title Video 8: Train and Test
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1rV411H7s5", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JokSIuPs-ys", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 8: Train and Test')
display(out)
###Output
_____no_output_____
###Markdown
**Training and Test Datasets**When loading a dataset, you can specify if you want to load the training or the test samples using the `train` argument. We can load the training and test datasets separately. For simplicity, today we will not use both datasets separately, but this topic will be adressed in the next days.
###Code
# Load the training samples
training_data = datasets.CIFAR10(
root="data",
train=True,
download=True,
transform=ToTensor()
)
# Load the test samples
test_data = datasets.CIFAR10(
root="data",
train=False,
download=True,
transform=ToTensor()
)
# @title Video 9: Data Augmentation - Transformations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV19B4y1N77t", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"sjegA9OBUPw", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 9: Data Augmentation - Transformations')
display(out)
###Output
_____no_output_____
###Markdown
**Dataloader**Another important concept is the `Dataloader`. It is a wrapper around the `Dataset` that splits it into minibatches (important for training the neural network) and makes the data iterable. The `shuffle` argument is used to shuffle the order of the samples across the minibatches.
###Code
# Create dataloaders with
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
*Reproducibility:* DataLoader will reseed workers following Randomness in multi-process data loading algorithm. Use `worker_init_fn()` and a `generator` to preserve reproducibility:```pythondef seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 numpy.random.seed(worker_seed) random.seed(worker_seed)g_seed = torch.Generator()g_seed.manual_seed(my_seed)DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=seed_worker, generator=g_seed )``` **Note:** For the `seed_worker` to have an effect, `num_workers` should be 2 or more. We can now query the next batch from the data loader and inspect it. For this we need to convert the dataloader object to a Python iterator using the function `iter` and then we can query the next batch using the function `next`.We can now see that we have a 4D tensor. This is because we have a 64 images in the batch ($B$) and each image has 3 dimensions: channels ($C$), height ($H$) and width ($W$). So, the size of the 4D tensor is $B \times C \times H \times W$.
###Code
# Load the next batch
batch_images, batch_labels = next(iter(train_dataloader))
print('Batch size:', batch_images.shape)
# Display the first image from the batch
plt.imshow(batch_images[0].permute(1, 2, 0))
plt.show()
###Output
_____no_output_____
###Markdown
**Transformations**Another useful feature when loading a dataset is applying transformations on the data - color conversions, normalization, cropping, rotation etc. There are many predefined transformations in the `torchvision.transforms` package and you can also combine them using the `Compose` transform. Checkout the [pytorch documentation](https://pytorch.org/vision/stable/transforms.html) for details. Coding Exercise 2.6: Load the CIFAR10 dataset as grayscale imagesThe goal of this excercise is to load the images from the CIFAR10 dataset as grayscale images. Note that we rerun the `set_seed` function to ensure reproducibility.
###Code
def my_data_load():
###############################################
## TODO for students: recreate the above function, but
## ensure all computation happens on the GPU
raise NotImplementedError("Student exercise: fill in the missing code to load the data")
###############################################
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(...,
transform=...)
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
# data = my_data_load()
# to_remove solution
def my_data_load():
## TODO Load the CIFAR10 data using a transform that converts the images to grayscale tensors
data = datasets.CIFAR10(root="data", download=True,
transform=Compose([ToTensor(), Grayscale()]))
# Display a random grayscale image
image, label = data[random.randint(0, len(data))]
plt.imshow(image.squeeze(), cmap="gray")
plt.show()
return data
set_seed(seed=2021)
## After implementing the above code, uncomment the following lines to test your code
data = my_data_load()
###Output
_____no_output_____
###Markdown
--- Section 3: Neural NetworksNow it's time for you to create your first neural network using PyTorch. This section will walk you through the process of:- Creating a simple neural network model- Training the network- Visualizing the results of the network- Tweeking the network
###Code
# @title Video 10: CSV Files
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1xy4y1T7kv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JrC_UAJWYKU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 10: CSV Files')
display(out)
###Output
_____no_output_____
###Markdown
Section 3.1: Data LoadingFirst we need some sample data to train our network on. You can use the function below to generate an example dataset consisting of 2D points along two interleaving half circles. The data will be stored in a file called `sample_data.csv`. You can inspect the file directly in Colab by going to Files on the left side and opening the CSV file.
###Code
# @title Generate sample data
# @markdown we used `scikit-learn` module
from sklearn.datasets import make_moons
# Create a dataset of 256 points with a little noise
X, y = make_moons(256, noise=0.1)
# Store the data as a Pandas data frame and save it to a CSV file
df = pd.DataFrame(dict(x0=X[:,0], x1=X[:,1], y=y))
df.to_csv('sample_data.csv')
###Output
_____no_output_____
###Markdown
Now we can load the data from the CSV file using the Pandas library. Pandas provides many functions for reading files in various formats. When loading data from a CSV file, we can reference the columns directly by their names.
###Code
# Load the data from the CSV file in a Pandas DataFrame
data = pd.read_csv("sample_data.csv")
# Create a 2D numpy array from the x0 and x1 columns
X_orig = data[["x0", "x1"]].to_numpy()
# Create a 1D numpy array from the y column
y_orig = data["y"].to_numpy()
# Print the sizes of the generated 2D points X and the corresponding labels Y
print(f"Size X:{X_orig.shape}")
print(f"Size y:{y_orig.shape}")
# Visualize the dataset. The color of the points is determined by the labels `y_orig`.
plt.scatter(X_orig[:, 0], X_orig[:, 1], s=40, c=y_orig)
plt.show()
###Output
_____no_output_____
###Markdown
**Prepare Data for PyTorch**Now let's prepare the data in a format suitable for PyTorch - convert everything into tensors.
###Code
# Initialize the device variable
DEVICE = set_device()
# Convert the 2D points to a float32 tensor
X = torch.tensor(X_orig, dtype=torch.float32)
# Upload the tensor to the device
X = X.to(DEVICE)
print(f"Size X:{X.shape}")
# Convert the labels to a long interger tensor
y = torch.from_numpy(y_orig).type(torch.LongTensor)
# Upload the tensor to the device
y = y.to(DEVICE)
print(f"Size y:{y.shape}")
###Output
_____no_output_____
###Markdown
Section 3.2: Create a Simple Neural Network
###Code
# @title Video 11: Generating the Neural Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1fK4y1M74a", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"PwSzRohUvck", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 11: Generating the Neural Network')
display(out)
###Output
_____no_output_____
###Markdown
For this example we want to have a simple neural network consisting of 3 layers:- 1 input layer of size 2 (our points have 2 coordinates)- 1 hidden layer of size 16 (you can play with different numbers here)- 1 output layer of size 2 (we want the have the scores for the two classes)During the course you will deal with differend kinds of neural networks. On Day 2 we will focus on linear networks, but you will work with some more complicated architectures in the next days. The example here is meant to demonstrate the process of creating and training a neural network end-to-end.**Programing the Network**PyTorch provides a base class for all neural network modules called [`nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). You need to inherit from `nn.Module` and implement some important methods:`__init__`In the `__init__` method you need to define the structure of your network. Here you will specify what layers will the network consist of, what activation functions will be used etc.`forward`All neural network modules need to implement the `forward` method. It specifies the computations the network needs to do when data is passed through it.`predict`This is not an obligatory method of a neural network module, but it is a good practice if you want to quickly get the most likely label from the network. It calls the `forward` method and chooses the label with the highest score.`train`This is also not an obligatory method, but it is a good practice to have. The method will be used to train the network parameters and will be implemented later in the notebook.> Note that you can use the `__call__` method of a module directly and it will invoke the `forward` method: `net()` does the same as `net.forward()`.
###Code
# Inherit from nn.Module - the base class for neural network modules provided by Pytorch
class NaiveNet(nn.Module):
# Define the structure of your network
def __init__(self):
super(NaiveNet, self).__init__()
# The network is defined as a sequence of operations
self.layers = nn.Sequential(
nn.Linear(2, 16), # Transformation from the input to the hidden layer
nn.ReLU(), # Activation function (ReLU) is a non-linearity which is widely used because it reduces computation. The function returns 0 if it receives any
# negative input, but for any positive value x, it returns that value back.
nn.Linear(16, 2), # Transformation from the hidden to the output layer
)
# Specify the computations performed on the data
def forward(self, x):
# Pass the data through the layers
return self.layers(x)
# Choose the most likely label predicted by the network
def predict(self, x):
# Pass the data through the networks
output = self.forward(x)
# Choose the label with the highest score
return torch.argmax(output, 1)
# Train the neural network (will be implemented later)
def train(self, X, y):
pass
###Output
_____no_output_____
###Markdown
**Check that your network works**Create an instance of your model and visualize it
###Code
# Create new NaiveNet and transfer it to the device
model = NaiveNet().to(DEVICE)
# Print the structure of the network
print(model)
###Output
_____no_output_____
###Markdown
Coding Exercise 3.2: Classify some samplesNow let's pass some of the points of our dataset through the network and see if it works. You should not expect the network to actually classify the points correctly, because it has not been trained yet. The goal here is just to get some experience with the data structures that are passed to the forward and predict methods and their results.
###Code
## Get the samples
# X_samples = ...
# print("Sample input:\n", X_samples)
## Do a forward pass of the network
# output = ...
# print("\nNetwork output:\n", output)
## Predict the label of each point
# y_predicted = ...
# print("\nPredicted labels:\n", y_predicted)
# to_remove solution
## Get the samples
X_samples = X[0:5]
print("Sample input:\n", X_samples)
# Do a forward pass of the network
output = model.forward(X_samples)
print("\nNetwork output:\n", output)
# Predict the label of each point
y_predicted = model.predict(X_samples)
print("\nPredicted labels:\n", y_predicted)
###Output
_____no_output_____
###Markdown
```Sample input: tensor([[ 0.9066, 0.5052], [-0.2024, 1.1226], [ 1.0685, 0.2809], [ 0.6720, 0.5097], [ 0.8548, 0.5122]], device='cuda:0')Network output: tensor([[ 0.1543, -0.8018], [ 2.2077, -2.9859], [-0.5745, -0.0195], [ 0.1924, -0.8367], [ 0.1818, -0.8301]], device='cuda:0', grad_fn=)Predicted labels: tensor([0, 0, 1, 0, 0], device='cuda:0')``` Section 3.3: Train Your Neural Network
###Code
# @title Video 12: Train the Network
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1v54y1n7CS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"4MIqnE4XPaA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 12: Train the Network')
display(out)
###Output
_____no_output_____
###Markdown
Now it is time to train your network on your dataset. Don't worry if you don't fully understand everything yet - we wil cover training in much more details in the next days. For now, the goal is just to see your network in action!You will usually implement the `train` method directly when implementing your class `NaiveNet`. Here, we will implement it as a function outside of the class in order to have it in a ceparate cell.
###Code
# @title Helper function to plot the decision boundary
# Code adapted from this notebook: https://jonchar.net/notebooks/Artificial-Neural-Network-with-Keras/
from pathlib import Path
def plot_decision_boundary(model, X, y, device):
# Transfer the data to the CPU
X = X.cpu().numpy()
y = y.cpu().numpy()
# Check if the frames folder exists and create it if needed
frames_path = Path("frames")
if not frames_path.exists():
frames_path.mkdir()
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_points = torch.from_numpy(grid_points).type(torch.FloatTensor)
Z = model.predict(grid_points.to(device)).cpu().numpy()
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.binary)
# Implement the train function given a training dataset X and correcsponding labels y
def train(model, X, y):
# The Cross Entropy Loss is suitable for classification problems
loss_function = nn.CrossEntropyLoss()
# Create an optimizer (Stochastic Gradient Descent) that will be used to train the network
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of epochs
epochs = 15000
# List of losses for visualization
losses = []
for i in range(epochs):
# Pass the data through the network and compute the loss
# We'll use the whole dataset during the training instead of using batches
# in to order to keep the code simple for now.
y_logits = model.forward(X)
loss = loss_function(y_logits, y)
# Clear the previous gradients and compute the new ones
optimizer.zero_grad()
loss.backward()
# Adapt the weights of the network
optimizer.step()
# Store the loss
losses.append(loss.item())
# Print the results at every 1000th epoch
if i % 1000 == 0:
print(f"Epoch {i} loss is {loss.item()}")
plot_decision_boundary(model, X, y, DEVICE)
plt.savefig('frames/{:05d}.png'.format(i))
return losses
# Create a new network instance a train it
model = NaiveNet().to(DEVICE)
losses = train(model, X, y)
###Output
_____no_output_____
###Markdown
**Plot the loss during training**Plot the loss during the training to see how it reduces and converges.
###Code
plt.plot(np.linspace(1, len(losses), len(losses)), losses)
plt.xlabel("Epoch")
plt.ylabel("Loss")
# @title Visualize the training process
# @markdown ### Execute this cell!
!pip install imageio --quiet
!pip install pathlib --quiet
import imageio
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import Image, display
from pathlib import Path
InteractiveShell.ast_node_interactivity = "all"
# Make a list with all images
images = []
for i in range(10):
filename = "frames/0"+str(i)+"000.png"
images.append(imageio.imread(filename))
# Save the gif
imageio.mimsave('frames/movie.gif', images)
gifPath = Path("frames/movie.gif")
with open(gifPath,'rb') as f:
display(Image(data=f.read(), format='png'))
# @title Video 13: Play with it
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Cq4y1W7BH", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"_GGkapdOdSY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 13: Play with it')
display(out)
###Output
_____no_output_____
###Markdown
Exercise 3.3: Tweak your NetworkYou can now play around with the network a little bit to get a feeling of what different parameters are doing. Here are some ideas what you could try:- Increase or decrease the number of epochs for training- Increase or decrease the size of the hidden layer- Add one additional hidden layerCan you get the network to better fit the data?
###Code
# @title Video 14: XOR Widget
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mB4y1N7QS", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"oTr1nE2rCWg", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add timing to airtable
atform.add_event('Video 14: XOR Widget')
display(out)
###Output
_____no_output_____
###Markdown
Exclusive OR (XOR) logical operation gives a true (`1`) output when the number of true inputs is odd. That is, a true output result if one, and only one, of the inputs to the gate is true. If both inputs are false (`0`) or both are true or false output results. Mathematically speaking, XOR represents the inequality function, i.e., the output is true if the inputs are not alike; otherwise, the output is false.In case of two inputs ($X$ and $Y$) the following truth table is applied:\begin{array}{ccc}X & Y & \text{XOR} \\\hline0 & 0 & 0 \\0 & 1 & 1 \\1 & 0 & 1 \\1 & 1 & 0 \\\end{array}Here, with `0`, we denote `False`, and with `1` we denote `True` in boolean terms. Interactive Demo 3.3: Solving XORHere we use an open source and famous visualization widget developed by Tensorflow team available [here](https://github.com/tensorflow/playground).* Play with the widget and observe that you can not solve the continuous XOR dataset.* Now add one hidden layer with three units, play with the widget, and set weights by hand to solve this dataset perfectly.For the second part, you should set the weights by clicking on the connections and either type the value or use the up and down keys to change it by one increment. You could also do the same for the biases by clicking on the tiny square to each neuron's bottom left.Even though there are infinitely many solutions, a neat solution when $f(x)$ is ReLU is: \begin{equation} y = f(x_1)+f(x_2)-f(x_1+x_2)\end{equation}Try to set the weights and biases to implement this function after you played enough :)
###Code
# @markdown ###Play with the parameters to solve XOR
from IPython.display import HTML
HTML('<iframe width="1020" height="660" src="https://playground.arashash.com/#activation=relu&batchSize=10&dataset=xor®Dataset=reg-plane&learningRate=0.03®ularizationRate=0&noise=0&networkShape=&seed=0.91390&showTestData=false&discretize=false&percTrainData=90&x=true&y=true&xTimesY=false&xSquared=false&ySquared=false&cosX=false&sinX=false&cosY=false&sinY=false&collectStats=false&problem=classification&initZero=false&hideText=false" allowfullscreen></iframe>')
# @markdown Do you think we can solve the discrete XOR (only 4 possibilities) with only 2 hidden units?
w1_min_xor = 'Select' #@param ['Select', 'Yes', 'No']
if w1_min_xor == 'No':
print("Correct!")
else:
print("How about giving it another try?")
###Output
_____no_output_____
###Markdown
--- Section 4: Ethics And Course Info
###Code
# @title Video 15: Ethics
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1Hw41197oB", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Kt6JLi3rUFU", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 16: Be a group
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1j44y1272h", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Sfp6--d_H1A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Video 17: Syllabus
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1iB4y1N7uQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"cDvAqG_hAvQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
Meet our lecturers:Week 1: the building blocks* [Konrad Kording](https://kordinglab.com)* [Andrew Saxe](https://www.saxelab.org/)* [Surya Ganguli](https://ganguli-gang.stanford.edu/)* [Ioannis Mitliagkas](http://mitliagkas.github.io/)* [Lyle Ungar](https://www.cis.upenn.edu/~ungar/)Week 2: making things work* [Alona Fyshe](https://webdocs.cs.ualberta.ca/~alona/)* [Alexander Ecker](https://eckerlab.org/)* [James Evans](https://sociology.uchicago.edu/directory/james-evans)* [He He](https://hhexiy.github.io/)* [Vikash Gilja](https://tnel.ucsd.edu/bio) and [Akash Srivastava](https://akashgit.github.io/)Week 3: more magic* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Jane Wang](http://www.janexwang.com/) and [Feryal Behbahani](https://feryal.github.io/)* [Tim Lillicrap](https://contrastiveconvergence.net/~timothylillicrap/index.php) and [Blake Richards](https://www.mcgill.ca/neuro/blake-richards-phd)* [Josh Vogelstein](https://jovo.me/) and [Vincenzo Lamonaco](https://www.vincenzolomonaco.com/)Now, go to the [visualization of ICLR papers](https://iclr.cc/virtual/2021/paper_vis.html). Read a few abstracts. Look at the various clusters. Where do you see yourself in this map? --- Submit to Airtable
###Code
# @title Video 18: Submission info
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1e44y127ti", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"JwTn7ej2dq8", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. even if you don't finish the tutorial, still submit!Submitting is the only way we can verify that you attempted each tutorial, which is critical for the award of your completion certificate at the end of the course.Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only. It will not affect the code that is being run around it in any way , so please do not modify, comment out, or worry about any of those lines of code.Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
###Code
# @title Airtable Submission Link
from IPython import display
display.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one Plotby [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import requests
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json
POS_FILE = 'https://osf.io/qyrfn/download'
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'https://osf.io/vfdu6/download'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around.1. hover over a dot to see a tooltip (title, author)2. select a year in the legend (right) to filter dots2. zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?2. Can you determine when deep learning methods started booming ?3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. embed each paper by using abstract+title in SPECTER model4. project based on embedding using UMAP5. visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____
###Markdown
This is Darryl, the Deep Learning Dapper Lion, and he's here to teach you about content submission to airtable. At the end of each tutorial there will be an Airtable Submission Cell. Run the cell to generate the airtable submission button and click on it to submit your information to airtable.if it is the last tutorial of the day your button will look like this and take you to the end of day survey: otherwise it look like this: It is critical that you push the submit button for every tutorial you run. even if you don't finish the tutorial, still submit!Submitting is the only way we can verify that you attempted each tutorial, which is critical for us to be able to track your progress. TL;DR: Basic tutorial workflow1. work through the tutorial, answering Think! questions and code exercises2. at end each tutorial, (even if tutorial incomplete) run the airtable submission code cell3. Push the submission button4. if the last tutorial of the day, Submission button will also take you to the end of the day survey on a new page. complete that and submit it. Submission FAQs: 1. What if I want to change my answers to previous discussion questions? > you are free to change and resubmit any of the answers and Think! questions as many times as you like. However, please only run the airtable submission code and click on the link once you are ready to submit.2. Okay, but what if I submitted my airtable anyway and reallly want to resubmit?> After making changes, you can re-run the airtable submission cell code cell. This will result in a second submission from you for the data. This will make Darryl sad as it will be more work for him to clean up the data later. 3. HELP! I accidentally ran the code to generate the airtable submission button before I was ready to submit! what do I do?> If you run the code to generate the link, anything that happens afterwards will not be captured. Complete the tutorial and make sure to re-run the airtable submission again when you are finished before pressing the submission button. 4. What if I want to work on this on my own later, should I wait to submit until I'm finished?> Please submit wherever you are at the end of the day. It's graet that you want to keep working on this, but it's important to see the places where we tried things that didn't quite work out, so we can fix them for next year. Finally, we try to keep the airtable code as hidden as possible, but if you ever see any calls to `atform` such as `atform.add_event()` in the coding exercises, just know that is for saving airtable information only. It will not affect the code that is being run around it in any way , so please do not modify, comment out, or worry about any of those lines of code.Now, lets try submitting today's course to Airtable by running the next cell and clicking the button when it appears.
###Code
# @title Airtable Submission Link
from IPython import display
display.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/SurveyButton.png?raw=1"
alt="button link to survey" style="width:410px"></a>
</div>""" )
###Output
_____no_output_____
###Markdown
--- Bonus - 60 years of Machine Learning Research in one Plotby [Hendrik Strobelt](http://hendrik.strobelt.com) (MIT-IBM Watson AI Lab) with support from Benjamin Hoover.In this notebook we visualize a subset* of 3,300 articles retreived from the AllenAI [S2ORC dataset](https://github.com/allenai/s2orc). We represent each paper by a position that is output of a dimensionality reduction method applied to a vector representation of each paper. The vector representation is output of a neural network.*The selection is very biased on the keywords and methodology we used to filter. Please see the details section to learn about what we did.
###Code
# @title Import `altair` and load the data
!pip install altair vega_datasets --quiet
import requests
import altair as alt # altair is defining data visualizations
# Source data files
# Position data file maps ID to x,y positions
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc.pos_umap_cosine_100_d0.1.json
POS_FILE = 'https://osf.io/qyrfn/download'
# original link: http://gltr.io/temp/ml_regexv1_cs_ma_citation+_99perc_clean.csv
# Metadata file maps ID to title, abstract, author,....
META_FILE = 'https://osf.io/vfdu6/download'
# data loading and wrangling
def load_data():
positions = pd.read_json(POS_FILE)
positions[['x', 'y']] = positions['pos'].to_list()
meta = pd.read_csv(META_FILE)
return positions.merge(meta, left_on='id', right_on='paper_id')
# load data
data = load_data()
# @title Define Visualization using ALtair
YEAR_PERIOD = "quinquennial" # @param
selection = alt.selection_multi(fields=[YEAR_PERIOD], bind='legend')
data[YEAR_PERIOD] = (data["year"] / 5.0).apply(np.floor) * 5
chart = alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count"]], width=800,
height=800).mark_circle(radius=2, opacity=0.2).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False, clamp=True, domain=list(range(1955,2020,5))),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
# size='citation_count',
# color="decade:O",
opacity=alt.condition(selection, alt.value(.8), alt.value(0.2)),
).add_selection(
selection
).interactive()
###Output
_____no_output_____
###Markdown
Lets look at the Visualization. Each dot represents one paper. Close dots mean that the respective papers are closer related than distant ones. The color indicates the 5-year period of when the paper was published. The dot size indicates the citation count (within S2ORC corpus) as of July 2020. The view is **interactive** and allows for three main interactions. Try them and play around.1. hover over a dot to see a tooltip (title, author)2. select a year in the legend (right) to filter dots2. zoom in/out with scroll -- double click resets view
###Code
chart
###Output
_____no_output_____
###Markdown
QuestionsBy playing around, can you find some answers to the follwing questions?1. Can you find topical clusters? What cluster might occur because of a filtering error?2. Can you see a temporal trend in the data and clusters?2. Can you determine when deep learning methods started booming ?3. Can you find the key papers that where written before the DL "winter" that define milestones for a cluster? (tipp: look for large dots of different color) MethodsHere is what we did:1. Filtering of all papers who fullfilled the criterria: - are categorized as `Computer Science` or `Mathematics` - one of the following keywords appearing in title or abstract: `"machine learning|artificial intelligence|neural network|(machine|computer) vision|perceptron|network architecture| RNN | CNN | LSTM | BLEU | MNIST | CIFAR |reinforcement learning|gradient descent| Imagenet "`2. per year, remove all papers that are below the 99 percentile of citation count in that year3. embed each paper by using abstract+title in SPECTER model4. project based on embedding using UMAP5. visualize using Altair Find Authors
###Code
# @title Edit the `AUTHOR_FILTER` variable to full text search for authors.
AUTHOR_FILTER = "Rush " # @param space at the end means "word border"
### Don't ignore case when searching...
FLAGS = 0
### uncomment do ignore case
# FLAGS = re.IGNORECASE
## --- FILTER CODE.. make it your own ---
import re
data['issel'] = data['authors'].str.contains(AUTHOR_FILTER, na=False, flags=FLAGS, )
if data['issel'].mean()<0.0000000001:
print('No match found')
## --- FROM HERE ON VIS CODE ---
alt.Chart(data[["x", "y", "authors", "title", YEAR_PERIOD, "citation_count", "issel"]], width=800,
height=800) \
.mark_circle(stroke="black", strokeOpacity=1).encode(
alt.Color(YEAR_PERIOD+':O',
scale=alt.Scale(scheme='viridis', reverse=False),
# legend=alt.Legend(title='Total Records')
),
alt.Size('citation_count',
scale=alt.Scale(type="pow", exponent=1, range=[15, 300])
),
alt.StrokeWidth('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[0, 2]), legend=None),
alt.Opacity('issel:Q', scale=alt.Scale(type="linear", domain=[0,1], range=[.2, 1]), legend=None),
alt.X('x:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
alt.Y('y:Q',
scale=alt.Scale(zero=False), axis=alt.Axis(labels=False)
),
tooltip=['title', 'authors'],
).interactive()
###Output
_____no_output_____ |
MachineLearning_A-Z/Part01-DataPreprocessing/P1S2_Data-Preprocessing.ipynb | ###Markdown
Section 2In this section we will create a data processing template.--------------------- Lecture 8 - Get the datasetArrange all data for example in a CSV file. Here we will use [Data.csv](https://github.com/piLinux/StudyRoom/blob/master/MachineLearning_A-Z/Part01-DataPreprocessing/Data.csv) file.------------------------------- Lecture 9 - Importing the libraries
###Code
"""numpy:
An array object of arbitrary homogeneous items
Fast mathematical operations over arrays
Linear Algebra, Fourier Transforms, Random Number Generation"""
import numpy as np
import matplotlib.pyplot as plt # drawing plots
import pandas as pd # import and manage datasets
###Output
_____no_output_____
###Markdown
------------- Data.csv- It has 4 columns and 10 rows.- Index no of __Country__ column = 0- Index no of __Age__ column = 1- Index no of __Salary__ column = 2- Index no of __Purchased__ column = 3- Index of _ROW_ starts from _0_ as well (in this file we have rows 0-9). X:Column 0, 1, 2 are independent y:Column 3 is dependent Missing values:r4c2 and r6c1----------- Lecture 10 - Importing the dataset
###Code
dataset = pd.read_csv('Data.csv')
# all rows, all columns except the last column
X = dataset.iloc[:, :-1].values
# all rows, and only 4nd column (or column with index 3)
y = dataset.iloc[:, 3].values
###Output
_____no_output_____
###Markdown
Output:-------------- iloc Vs loc_iloc_: position based | _loc_: label based
###Code
s = pd.Series(np.nan, index=[49,48,47,46,45, 1, 2, 3, 4, 5])
s
s.iloc[:3] # from row 0 to 2
s.loc[:3] # till and including label 3
###Output
_____no_output_____
###Markdown
------------- Lecture 12: Taking care of missing dataIn our dataset, we have two missing data. So,- we either can delete those rows- or, we can populate those two missing data with mean, median or modeHere we are going to take _mean_ values.
###Code
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values="NaN", strategy="mean", axis=0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
###Output
_____no_output_____
###Markdown
Imputer__Definition:__ Imputer(self, missing_values="NaN", strategy="mean", axis=0, verbose=0, copy=True)__Type:__ Class in sklearn.preprocessing.imputation module__missing_values:__ integer or “NaN”, optional (default=”NaN”)- The placeholder for the missing values.- All occurrences of missing_values will be imputed.- For missing values encoded as np.nan, use the string value “NaN”.__strategy:__ string, optional (default=”mean”)The imputation strategy.- If “mean”, then replace missing values using the mean along the axis.- If “median”, then replace missing values using the median along the axis.- If “most_frequent”, then replace missing using the most frequent value along the axis.__axis:__ integer, optional (default=0)The axis along which to impute.- If axis=0, then impute along columns.- If axis=1, then impute along rows.
###Code
X
###Output
_____no_output_____
###Markdown
-------------------- Lecture 13: Encoding categorical dataWe will encode _countries_:- Since we have three countries, we need 3 columnsBy encoding, we want to achieve the following result for countries:- Column 0 corresponds to France- Column 1 corresponds to Germany- Column 2 corresponds to Spain
###Code
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
onehotencoder = OneHotEncoder(categorical_features=[0])
X = onehotencoder.fit_transform(X).toarray()
X
###Output
_____no_output_____
###Markdown
X (in plot, format %.0f): LabelEncoder- Encode labels with value between 0 and n_classes-1. For our dataset, we need only LabelEncoder to encode y (_yes_ and _no_ into _1_ and _0_). OneHotEncoder__Definition:__ OneHotEncoder(self, n_values="auto", categorical_features="all", dtype=np.float64, sparse=True, handle_unknown='error')- Encode categorical integer features using a one-hot aka one-of-K scheme.- The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. - It is assumed that input features take on values in the range [0, n_values).__categorical_features:__ “all” or array of indices or mask.Specify what features are treated as categorical.- ‘all’ (default): All features are treated as categorical.- array of indices: Array of categorical feature indices.- mask: Array of length n_features and with dtype=bool.Non-categorical features are always stacked to the right of the matrix.
###Code
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
y
###Output
_____no_output_____
###Markdown
 ----------- Lecture 14: Splitting the dataset into _training set_ and _test set_
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
X_train
###Output
_____no_output_____
###Markdown

###Code
X_test
###Output
_____no_output_____
###Markdown

###Code
y_train
###Output
_____no_output_____
###Markdown

###Code
y_test
###Output
_____no_output_____
###Markdown
 --------------- Lecture 15: Feature scaling Euclidean distance:
###Code
from IPython.display import Math
Math(r'Euclidean Distance = \sqrt{(x_{2}-x_{1})^{2}+(y_{2}-y_{1})^{2}}')
###Output
_____no_output_____
###Markdown
In our dataset, _age_ and _salary_ are not on the same scale. _Age_ will be dominated by _salary_. That's why we need to scale the features. Standardisation:
###Code
Math(r'x_{stand} = \frac{x-x_{mean}}{x_{standard_deviation}}')
###Output
_____no_output_____
###Markdown
Normalisation:
###Code
Math(r'x_{norm} = \frac{x-x_{min}}{x_{max}-x_{min}}')
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
###Output
_____no_output_____
###Markdown
To center the data (zero mean and unit standard error), we subtract the mean and then divide the result by the standard deviation.x′ = (x−μ)/σWe do that on the training set of data. Then we have to apply the same transformation to our testing set (e.g. in model-selection). But we have to use the same two parameters μ and σ that we used for centering the training set.Hence, every sklearn's transform's fit() just calculates the parameters (e.g. μ and σ in case of StandardScaler) and saves them as an internal objects state. Afterwards, we can call its transform() method to apply the transformation to a particular set of examples.fit_transform() joins these two steps and is used for the initial fitting of parameters on the training set X, but it also returns a transformed X′. Internally, it just calls first fit() and then transform() on the same data. For _categorical data_, we can do _feature scaling_ or not which entirely depends on situation.If we do _feature scaling_, we will lose the information about which country did purchase or not, but everything will be on the same scale.In our example, we did _feature scaling_. __X_train fit_transform:__ __X_test transform:__ We don't need to scale y dataset. ------------------ Lecture 17: Data preprocessing final template
###Code
# -*- coding: utf-8 -*-
# Part 1 - Section 2: Data Preprocessing
# Importing the Libraries
import numpy as np # mathematics
import matplotlib.pyplot as plt # drawing plots
import pandas as pd # import and manage datasets
# Importing the datasets
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Splitting the dataset into training set and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
### If requires, take care of missing data and encoding data
###Output
_____no_output_____ |
static/notebook/train-detection-from-json-image-list.ipynb | ###Markdown
Image Object Model (.h5) Generation for Wall (Sample Example) Uses **.json** image list file downloaded from image share/request page in ISAC-SIMO Save the downloaded **.json** file in same directory as the Notebook (Or change the path below)
###Code
import json
OBJECT_TYPE = "wall" # What your the Object Type (and the JSON file name)
# Replace json path as required
f = open(OBJECT_TYPE+'.json', "r")
walls = json.loads(f.read())
f.close()
###Output
_____no_output_____
###Markdown
Install PrerequisitesWe use Keras/Tensorflow to build the classification model, and visualize the process with matplotlib.
###Code
!pip install tensorflow==2.5.0 ibm-cos-sdk==2.6 h5py==2.10.0
# Import required libraries
import os
import uuid
import shutil
import json
import ibm_boto3
import tensorflow as tf
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Download and Save images
###Code
import urllib.request
import os
os.makedirs('data', exist_ok=True)
os.makedirs('data/'+OBJECT_TYPE, exist_ok=True)
for wall in walls:
urllib.request.urlretrieve(wall.get("url"), "data/"+OBJECT_TYPE+"/"+os.path.split(wall.get("url"))[1])
print(OBJECT_TYPE)
!ls 'data/'$OBJECT_TYPE
###Output
_____no_output_____
###Markdown
Build the ModelWe start with a [MobileNetV2](https://arxiv.org/abs/1801.04381) architecture as the backbone [pretrained feature extractor](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet). We then add a couple of dense layers and a softmax layer to perfom the classification. We freeze the MobileNetV2 backbone with weights trained on ImageNet dataset and only train the dense layers and softmax layer that we have added.Configuration needs to change depending on the image size and aspect ratio. Or you might use other backbone or libraries for training and exporting model.
###Code
base_model=tf.keras.applications.MobileNetV2(weights='imagenet', include_top=False, input_shape=(224, 224, 3), alpha=1.0) #imports the mobilenet model and discards the last 1000 neuron layer.
x=base_model.output
x=tf.keras.layers.GlobalAveragePooling2D()(x)
x=tf.keras.layers.Dense(512,activation='relu')(x) #dense layer 1
x=tf.keras.layers.Dense(256,activation='relu')(x) #dense layer 2
preds=tf.keras.layers.Dense(3,activation='softmax')(x) #final layer with softmax activation
model=tf.keras.Model(inputs=base_model.input,outputs=preds)
#Freeze layers from MobileNetV2 backbone (not to be trained)
for layer in base_model.layers:
layer.trainable=False
#Prepare the training dataset as a data generator object
train_datagen=tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input) #included in our dependencies
train_generator=train_datagen.flow_from_directory('data',
target_size=(224,224),
color_mode='rgb',
batch_size=10,
class_mode='categorical',
shuffle=True)
###Output
_____no_output_____
###Markdown
Using Adam, categorical_crossentropy and accuracy as optimization method, loss function and metrics, respectively
###Code
# Build the model
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
###Output
_____no_output_____
###Markdown
Train the model
###Code
from tensorflow.random import set_seed
set_seed(3)
step_size_train=1
epochs=1
log_file = model.fit(train_generator,
steps_per_epoch=step_size_train,
epochs=epochs)
###Output
_____no_output_____
###Markdown
Figure of Training Loss and AccuracyPlotting the training result as below (if required)
###Code
# # Model accuracy and loss vs epoch
# plt.plot(log_file.history['acc'], '-bo', label="train_accuracy")
# plt.plot(log_file.history['loss'], '-r*', label="train_loss")
# plt.title('Training Loss and Accuracy')
# plt.ylabel('Loss/Accuracy')
# plt.xlabel('Epoch #')
# plt.legend(loc='center right')
# plt.show()
###Output
_____no_output_____
###Markdown
Model PerformanceHere we perform inference on some sample data points to determine the performance of the model
###Code
# Mapping labels
label_map = (train_generator.class_indices)
label_map
# Returns Example: {'wall': 0}
# Multiple object types (specially related) can be trained same model to make it re-useable.
# Creating a sample inference function
def prediction(image_path, model):
img = tf.keras.preprocessing.image.load_img(image_path, target_size=(224, 224))
x = tf.keras.preprocessing.image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = tf.keras.applications.mobilenet_v2.preprocess_input(x)
preds = model.predict(x)
# print('Predictions', preds)
# Printing the prediction score and class
# for pred, value in label_map.items():
# if value == np.argmax(preds):
# print('Predicted class is:', pred)
# print('With a confidence score of: ', np.max(preds))
# Format the Output as required by ISAC-SIMO Offline Model (Object Detect)
# https://www.isac-simo.net/app/offline_model/readme.md#object-detect
if np.argmax(preds) == 0:
return [[np.max(preds)]] # Detected
else:
return [[0]] # Did not Detect
# Download some Sample Images (Example: Stirrup)
wall_img = 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/d8/Fragment_muru_z_ceg%C5%82y.jpg/1280px-Fragment_muru_z_ceg%C5%82y.jpg'
!wget {wall_img} -O wall.jpg
# Opening test image
image = Image.open("wall.jpg")
image
prediction('wall.jpg', model)
# Returns Example: [[0.69597286, 0]]
# Generate and Download the .h5 Model
from google.colab import files
!mkdir -p saved_model
model.save('saved_model/detect-model-for-'+OBJECT_TYPE+'.h5')
files.download('saved_model/detect-model-for-'+OBJECT_TYPE+'.h5')
###Output
_____no_output_____ |
Neural style transfer/Art_Generation_with_Neural_Style_Transfer_v3a.ipynb | ###Markdown
Deep Learning & Art: Neural Style TransferIn this assignment, you will learn about Neural Style Transfer. This algorithm was created by [Gatys et al. (2015).](https://arxiv.org/abs/1508.06576)**In this assignment, you will:**- Implement the neural style transfer algorithm - Generate novel artistic images using your algorithm Most of the algorithms you've studied optimize a cost function to get a set of parameter values. In Neural Style Transfer, you'll optimize a cost function to get pixel values! Updates If you were working on the notebook before this update...* The current notebook is version "3a".* You can find your original work saved in the notebook with the previous version name ("v2") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. List of updates* Use `pprint.PrettyPrinter` to format printing of the vgg model.* computing content cost: clarified and reformatted instructions, fixed broken links, added additional hints for unrolling.* style matrix: clarify two uses of variable "G" by using different notation for gram matrix.* style cost: use distinct notation for gram matrix, added additional hints.* Grammar and wording updates for clarity.* `model_nn`: added hints.
###Code
import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
from nst_utils import *
import numpy as np
import tensorflow as tf
import pprint
import nst_utils
%matplotlib inline
###Output
_____no_output_____
###Markdown
1 - Problem StatementNeural Style Transfer (NST) is one of the most fun techniques in deep learning. As seen below, it merges two images, namely: a **"content" image (C) and a "style" image (S), to create a "generated" image (G**). The generated image G combines the "content" of the image C with the "style" of image S. In this example, you are going to generate an image of the Louvre museum in Paris (content image C), mixed with a painting by Claude Monet, a leader of the impressionist movement (style image S).Let's see how you can do this. 2 - Transfer LearningNeural Style Transfer (NST) uses a previously trained convolutional network, and builds on top of that. The idea of using a network trained on a different task and applying it to a new task is called transfer learning. Following the [original NST paper](https://arxiv.org/abs/1508.06576), we will use the VGG network. Specifically, we'll use VGG-19, a 19-layer version of the VGG network. This model has already been trained on the very large ImageNet database, and thus has learned to recognize a variety of low level features (at the shallower layers) and high level features (at the deeper layers). Run the following code to load parameters from the VGG model. This may take a few seconds.
###Code
pp = pprint.PrettyPrinter(indent=4)
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
pp.pprint(model)
###Output
{ 'avgpool1': <tf.Tensor 'AvgPool:0' shape=(1, 150, 200, 64) dtype=float32>,
'avgpool2': <tf.Tensor 'AvgPool_1:0' shape=(1, 75, 100, 128) dtype=float32>,
'avgpool3': <tf.Tensor 'AvgPool_2:0' shape=(1, 38, 50, 256) dtype=float32>,
'avgpool4': <tf.Tensor 'AvgPool_3:0' shape=(1, 19, 25, 512) dtype=float32>,
'avgpool5': <tf.Tensor 'AvgPool_4:0' shape=(1, 10, 13, 512) dtype=float32>,
'conv1_1': <tf.Tensor 'Relu:0' shape=(1, 300, 400, 64) dtype=float32>,
'conv1_2': <tf.Tensor 'Relu_1:0' shape=(1, 300, 400, 64) dtype=float32>,
'conv2_1': <tf.Tensor 'Relu_2:0' shape=(1, 150, 200, 128) dtype=float32>,
'conv2_2': <tf.Tensor 'Relu_3:0' shape=(1, 150, 200, 128) dtype=float32>,
'conv3_1': <tf.Tensor 'Relu_4:0' shape=(1, 75, 100, 256) dtype=float32>,
'conv3_2': <tf.Tensor 'Relu_5:0' shape=(1, 75, 100, 256) dtype=float32>,
'conv3_3': <tf.Tensor 'Relu_6:0' shape=(1, 75, 100, 256) dtype=float32>,
'conv3_4': <tf.Tensor 'Relu_7:0' shape=(1, 75, 100, 256) dtype=float32>,
'conv4_1': <tf.Tensor 'Relu_8:0' shape=(1, 38, 50, 512) dtype=float32>,
'conv4_2': <tf.Tensor 'Relu_9:0' shape=(1, 38, 50, 512) dtype=float32>,
'conv4_3': <tf.Tensor 'Relu_10:0' shape=(1, 38, 50, 512) dtype=float32>,
'conv4_4': <tf.Tensor 'Relu_11:0' shape=(1, 38, 50, 512) dtype=float32>,
'conv5_1': <tf.Tensor 'Relu_12:0' shape=(1, 19, 25, 512) dtype=float32>,
'conv5_2': <tf.Tensor 'Relu_13:0' shape=(1, 19, 25, 512) dtype=float32>,
'conv5_3': <tf.Tensor 'Relu_14:0' shape=(1, 19, 25, 512) dtype=float32>,
'conv5_4': <tf.Tensor 'Relu_15:0' shape=(1, 19, 25, 512) dtype=float32>,
'input': <tf.Variable 'Variable:0' shape=(1, 300, 400, 3) dtype=float32_ref>}
###Markdown
* The model is stored in a python dictionary. * The python dictionary contains key-value pairs for each layer. * The 'key' is the variable name and the 'value' is a tensor for that layer. Assign input image to the model's input layerTo run an image through this network, you just have to feed the image to the model. In TensorFlow, you can do so using the [tf.assign](https://www.tensorflow.org/api_docs/python/tf/assign) function. In particular, you will use the assign function like this: ```pythonmodel["input"].assign(image)```This assigns the image as an input to the model. Activate a layerAfter this, if you want to access the activations of a particular layer, say layer `4_2` when the network is run on this image, you would run a TensorFlow session on the correct tensor `conv4_2`, as follows: ```pythonsess.run(model["conv4_2"])``` 3 - Neural Style Transfer (NST)We will build the Neural Style Transfer (NST) algorithm in three steps:- Build the content cost function $J_{content}(C,G)$- Build the style cost function $J_{style}(S,G)$- Put it together to get $J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$. 3.1 - Computing the content costIn our running example, the content image C will be the picture of the Louvre Museum in Paris. Run the code below to see a picture of the Louvre.
###Code
content_image = scipy.misc.imread("images/louvre.jpg")
imshow(content_image);
###Output
_____no_output_____
###Markdown
The content image (C) shows the Louvre museum's pyramid surrounded by old Paris buildings, against a sunny sky with a few clouds.** 3.1.1 - Make generated image G match the content of image C** Shallower versus deeper layers* The shallower layers of a ConvNet tend to detect lower-level features such as edges and simple textures.* The deeper layers tend to detect higher-level features such as more complex textures as well as object classes. Choose a "middle" activation layer $a^{[l]}$We would like the "generated" image G to have similar content as the input image C. Suppose you have chosen some layer's activations to represent the content of an image. * In practice, you'll get the most visually pleasing results if you choose a layer in the **middle** of the network--neither too shallow nor too deep. * (After you have finished this exercise, feel free to come back and experiment with using different layers, to see how the results vary.) Forward propagate image "C"* Set the image C as the input to the pretrained VGG network, and run forward propagation. * Let $a^{(C)}$ be the hidden layer activations in the layer you had chosen. (In lecture, we had written this as $a^{[l](C)}$, but here we'll drop the superscript $[l]$ to simplify the notation.) This will be an $n_H \times n_W \times n_C$ tensor. Forward propagate image "G"* Repeat this process with the image G: Set G as the input, and run forward progation. * Let $a^{(G)}$ be the corresponding hidden layer activation. Content Cost Function $J_{content}(C,G)$We will define the content cost function as:$$J_{content}(C,G) = \frac{1}{4 \times n_H \times n_W \times n_C}\sum _{ \text{all entries}} (a^{(C)} - a^{(G)})^2\tag{1} $$* Here, $n_H, n_W$ and $n_C$ are the height, width and number of channels of the hidden layer you have chosen, and appear in a normalization term in the cost. * For clarity, note that $a^{(C)}$ and $a^{(G)}$ are the 3D volumes corresponding to a hidden layer's activations. * In order to compute the cost $J_{content}(C,G)$, it might also be convenient to unroll these 3D volumes into a 2D matrix, as shown below.* Technically this unrolling step isn't needed to compute $J_{content}$, but it will be good practice for when you do need to carry out a similar operation later for computing the style cost $J_{style}$. **Exercise:** Compute the "content cost" using TensorFlow. **Instructions**: The 3 steps to implement this function are:1. Retrieve dimensions from `a_G`: - To retrieve dimensions from a tensor `X`, use: `X.get_shape().as_list()`2. Unroll `a_C` and `a_G` as explained in the picture above - You'll likey want to use these functions: [tf.transpose](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/transpose) and [tf.reshape](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/reshape).3. Compute the content cost: - You'll likely want to use these functions: [tf.reduce_sum](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [tf.square](https://www.tensorflow.org/api_docs/python/tf/square) and [tf.subtract](https://www.tensorflow.org/api_docs/python/tf/subtract). Additional Hints for "Unrolling"* To unroll the tensor, we want the shape to change from $(m,n_H,n_W,n_C)$ to $(m, n_H \times n_W, n_C)$.* `tf.reshape(tensor, shape)` takes a list of integers that represent the desired output shape.* For the `shape` parameter, a `-1` tells the function to choose the correct dimension size so that the output tensor still contains all the values of the original tensor.* So tf.reshape(a_C, shape=[m, n_H * n_W, n_C]) gives the same result as tf.reshape(a_C, shape=[m, -1, n_C]).* If you prefer to re-order the dimensions, you can use `tf.transpose(tensor, perm)`, where `perm` is a list of integers containing the original index of the dimensions. * For example, `tf.transpose(a_C, perm=[0,3,1,2])` changes the dimensions from $(m, n_H, n_W, n_C)$ to $(m, n_C, n_H, n_W)$.* There is more than one way to unroll the tensors.* Notice that it's not necessary to use tf.transpose to 'unroll' the tensors in this case but this is a useful function to practice and understand for other situations that you'll encounter.
###Code
# GRADED FUNCTION: compute_content_cost
def compute_content_cost(a_C, a_G):
"""
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G
Returns:
J_content -- scalar that you compute using equation 1 above.
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G (≈2 lines)
a_C_unrolled = tf.reshape(a_C, [m, n_H * n_W, n_C])
a_G_unrolled = tf.reshape(a_G, [m, n_H * n_W, n_C])
# compute the cost with tensorflow (≈1 line)
J_content = tf.reduce_sum((a_C - a_G)**2) / (4 * n_H * n_W * n_C)
### END CODE HERE ###
return J_content
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_content = compute_content_cost(a_C, a_G)
print("J_content = " + str(J_content.eval()))
###Output
J_content = 6.76559
###Markdown
**Expected Output**: **J_content** 6.76559 What you should remember- The content cost takes a hidden layer activation of the neural network, and measures how different $a^{(C)}$ and $a^{(G)}$ are. - When we minimize the content cost later, this will help make sure $G$ has similar content as $C$. 3.2 - Computing the style costFor our running example, we will use the following style image:
###Code
style_image = scipy.misc.imread("images/monet_800600.jpg")
imshow(style_image);
###Output
_____no_output_____
###Markdown
This was painted in the style of *[impressionism](https://en.wikipedia.org/wiki/Impressionism)*.Lets see how you can now define a "style" cost function $J_{style}(S,G)$. 3.2.1 - Style matrix Gram matrix* The style matrix is also called a "Gram matrix." * In linear algebra, the Gram matrix G of a set of vectors $(v_{1},\dots ,v_{n})$ is the matrix of dot products, whose entries are ${\displaystyle G_{ij} = v_{i}^T v_{j} = np.dot(v_{i}, v_{j}) }$. * In other words, $G_{ij}$ compares how similar $v_i$ is to $v_j$: If they are highly similar, you would expect them to have a large dot product, and thus for $G_{ij}$ to be large. Two meanings of the variable $G$* Note that there is an unfortunate collision in the variable names used here. We are following common terminology used in the literature. * $G$ is used to denote the Style matrix (or Gram matrix) * $G$ also denotes the generated image. * For this assignment, we will use $G_{gram}$ to refer to the Gram matrix, and $G$ to denote the generated image. Compute $G_{gram}$In Neural Style Transfer (NST), you can compute the Style matrix by multiplying the "unrolled" filter matrix with its transpose:$$\mathbf{G}_{gram} = \mathbf{A}_{unrolled} \mathbf{A}_{unrolled}^T$$ $G_{(gram)i,j}$: correlationThe result is a matrix of dimension $(n_C,n_C)$ where $n_C$ is the number of filters (channels). The value $G_{(gram)i,j}$ measures how similar the activations of filter $i$ are to the activations of filter $j$. $G_{(gram),i,i}$: prevalence of patterns or textures* The diagonal elements $G_{(gram)ii}$ measure how "active" a filter $i$ is. * For example, suppose filter $i$ is detecting vertical textures in the image. Then $G_{(gram)ii}$ measures how common vertical textures are in the image as a whole.* If $G_{(gram)ii}$ is large, this means that the image has a lot of vertical texture. By capturing the prevalence of different types of features ($G_{(gram)ii}$), as well as how much different features occur together ($G_{(gram)ij}$), the Style matrix $G_{gram}$ measures the style of an image. **Exercise**:* Using TensorFlow, implement a function that computes the Gram matrix of a matrix A. * The formula is: The gram matrix of A is $G_A = AA^T$. * You may use these functions: [matmul](https://www.tensorflow.org/api_docs/python/tf/matmul) and [transpose](https://www.tensorflow.org/api_docs/python/tf/transpose).
###Code
# GRADED FUNCTION: gram_matrix
def gram_matrix(A):
"""
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
"""
### START CODE HERE ### (≈1 line)
GA = tf.matmul(A, tf.transpose(A))
### END CODE HERE ###
return GA
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
A = tf.random_normal([3, 2*1], mean=1, stddev=4)
GA = gram_matrix(A)
print("GA = \n" + str(GA.eval()))
###Output
GA =
[[ 6.42230511 -4.42912197 -2.09668207]
[ -4.42912197 19.46583748 19.56387138]
[ -2.09668207 19.56387138 20.6864624 ]]
###Markdown
**Expected Output**: **GA** [[ 6.42230511 -4.42912197 -2.09668207] [ -4.42912197 19.46583748 19.56387138] [ -2.09668207 19.56387138 20.6864624 ]] 3.2.2 - Style cost Your goal will be to minimize the distance between the Gram matrix of the "style" image S and the gram matrix of the "generated" image G. * For now, we are using only a single hidden layer $a^{[l]}$. * The corresponding style cost for this layer is defined as: $$J_{style}^{[l]}(S,G) = \frac{1}{4 \times {n_C}^2 \times (n_H \times n_W)^2} \sum _{i=1}^{n_C}\sum_{j=1}^{n_C}(G^{(S)}_{(gram)i,j} - G^{(G)}_{(gram)i,j})^2\tag{2} $$* $G_{gram}^{(S)}$ Gram matrix of the "style" image.* $G_{gram}^{(G)}$ Gram matrix of the "generated" image.* Remember, this cost is computed using the hidden layer activations for a particular hidden layer in the network $a^{[l]}$ **Exercise**: Compute the style cost for a single layer. **Instructions**: The 3 steps to implement this function are:1. Retrieve dimensions from the hidden layer activations a_G: - To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()`2. Unroll the hidden layer activations a_S and a_G into 2D matrices, as explained in the picture above (see the images in the sections "computing the content cost" and "style matrix"). - You may use [tf.transpose](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/transpose) and [tf.reshape](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/reshape).3. Compute the Style matrix of the images S and G. (Use the function you had previously written.) 4. Compute the Style cost: - You may find [tf.reduce_sum](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [tf.square](https://www.tensorflow.org/api_docs/python/tf/square) and [tf.subtract](https://www.tensorflow.org/api_docs/python/tf/subtract) useful. Additional Hints* Since the activation dimensions are $(m, n_H, n_W, n_C)$ whereas the desired unrolled matrix shape is $(n_C, n_H*n_W)$, the order of the filter dimension $n_C$ is changed. So `tf.transpose` can be used to change the order of the filter dimension.* for the product $\mathbf{G}_{gram} = \mathbf{A}_{} \mathbf{A}_{}^T$, you will also need to specify the `perm` parameter for the `tf.transpose` function.
###Code
# GRADED FUNCTION: compute_layer_style_cost
def compute_layer_style_cost(a_S, a_G):
"""
Arguments:
a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G
Returns:
J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)
a_S = tf.transpose(tf.reshape(a_S, [n_H * n_W, n_C]), [1,0])
a_G = tf.transpose(tf.reshape(a_G, [n_H * n_W, n_C]), [1,0])
# Computing gram_matrices for both images S and G (≈2 lines)
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Computing the loss (≈1 line)
J_style_layer = tf.reduce_sum((GS - GG) ** 2) / (2 * n_C * n_H * n_W)**2
### END CODE HERE ###
return J_style_layer
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_style_layer = compute_layer_style_cost(a_S, a_G)
print("J_style_layer = " + str(J_style_layer.eval()))
###Output
J_style_layer = 9.19028
###Markdown
**Expected Output**: **J_style_layer** 9.19028 3.2.3 Style Weights* So far you have captured the style from only one layer. * We'll get better results if we "merge" style costs from several different layers. * Each layer will be given weights ($\lambda^{[l]}$) that reflect how much each layer will contribute to the style.* After completing this exercise, feel free to come back and experiment with different weights to see how it changes the generated image $G$.* By default, we'll give each layer equal weight, and the weights add up to 1. ($\sum_{l}^L\lambda^{[l]} = 1$)
###Code
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)]
###Output
_____no_output_____
###Markdown
You can combine the style costs for different layers as follows:$$J_{style}(S,G) = \sum_{l} \lambda^{[l]} J^{[l]}_{style}(S,G)$$where the values for $\lambda^{[l]}$ are given in `STYLE_LAYERS`. Exercise: compute style cost* We've implemented a compute_style_cost(...) function. * It calls your `compute_layer_style_cost(...)` several times, and weights their results using the values in `STYLE_LAYERS`. * Please read over it to make sure you understand what it's doing. Description of `compute_style_cost`For each layer:* Select the activation (the output tensor) of the current layer.* Get the style of the style image "S" from the current layer.* Get the style of the generated image "G" from the current layer.* Compute the "style cost" for the current layer* Add the weighted style cost to the overall style cost (J_style)Once you're done with the loop: * Return the overall style cost.
###Code
def compute_style_cost(model, STYLE_LAYERS):
"""
Computes the overall style cost from several chosen layers
Arguments:
model -- our tensorflow model
STYLE_LAYERS -- A python list containing:
- the names of the layers we would like to extract style from
- a coefficient for each of them
Returns:
J_style -- tensor representing a scalar value, style cost defined above by equation (2)
"""
# initialize the overall style cost
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
# Select the output tensor of the currently selected layer
out = model[layer_name]
# Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute style_cost for the current layer
J_style_layer = compute_layer_style_cost(a_S, a_G)
# Add coeff * J_style_layer of this layer to overall style cost
J_style += coeff * J_style_layer
return J_style
###Output
_____no_output_____
###Markdown
**Note**: In the inner-loop of the for-loop above, `a_G` is a tensor and hasn't been evaluated yet. It will be evaluated and updated at each iteration when we run the TensorFlow graph in model_nn() below.<!-- How do you choose the coefficients for each layer? The deeper layers capture higher-level concepts, and the features in the deeper layers are less localized in the image relative to each other. So if you want the generated image to softly follow the style image, try choosing larger weights for deeper layers and smaller weights for the first layers. In contrast, if you want the generated image to strongly follow the style image, try choosing smaller weights for deeper layers and larger weights for the first layers!--> What you should remember- The style of an image can be represented using the Gram matrix of a hidden layer's activations. - We get even better results by combining this representation from multiple different layers. - This is in contrast to the content representation, where usually using just a single hidden layer is sufficient.- Minimizing the style cost will cause the image $G$ to follow the style of the image $S$. 3.3 - Defining the total cost to optimize Finally, let's create a cost function that minimizes both the style and the content cost. The formula is: $$J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$$**Exercise**: Implement the total cost function which includes both the content cost and the style cost.
###Code
# GRADED FUNCTION: total_cost
def total_cost(J_content, J_style, alpha = 10, beta = 40):
"""
Computes the total cost function
Arguments:
J_content -- content cost coded above
J_style -- style cost coded above
alpha -- hyperparameter weighting the importance of the content cost
beta -- hyperparameter weighting the importance of the style cost
Returns:
J -- total cost as defined by the formula above.
"""
### START CODE HERE ### (≈1 line)
J = alpha * J_content + beta * J_style
### END CODE HERE ###
return J
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(3)
J_content = np.random.randn()
J_style = np.random.randn()
J = total_cost(J_content, J_style)
print("J = " + str(J))
###Output
J = 35.34667875478276
###Markdown
**Expected Output**: **J** 35.34667875478276 What you should remember- The total cost is a linear combination of the content cost $J_{content}(C,G)$ and the style cost $J_{style}(S,G)$.- $\alpha$ and $\beta$ are hyperparameters that control the relative weighting between content and style. 4 - Solving the optimization problem Finally, let's put everything together to implement Neural Style Transfer!Here's what the program will have to do:1. Create an Interactive Session2. Load the content image 3. Load the style image4. Randomly initialize the image to be generated 5. Load the VGG19 model7. Build the TensorFlow graph: - Run the content image through the VGG19 model and compute the content cost - Run the style image through the VGG19 model and compute the style cost - Compute the total cost - Define the optimizer and the learning rate8. Initialize the TensorFlow graph and run it for a large number of iterations, updating the generated image at every step.Lets go through the individual steps in detail. Interactive SessionsYou've previously implemented the overall cost $J(G)$. We'll now set up TensorFlow to optimize this with respect to $G$. * To do so, your program has to reset the graph and use an "[Interactive Session](https://www.tensorflow.org/api_docs/python/tf/InteractiveSession)". * Unlike a regular session, the "Interactive Session" installs itself as the default session to build a graph. * This allows you to run variables without constantly needing to refer to the session object (calling "sess.run()"), which simplifies the code. Start the interactive session.
###Code
# Reset the graph
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
###Output
_____no_output_____
###Markdown
Content imageLet's load, reshape, and normalize our "content" image (the Louvre museum picture):
###Code
content_image = scipy.misc.imread("images/louvre_small.jpg")
content_image = reshape_and_normalize_image(content_image)
###Output
_____no_output_____
###Markdown
Style imageLet's load, reshape and normalize our "style" image (Claude Monet's painting):
###Code
style_image = scipy.misc.imread("images/monet.jpg")
style_image = reshape_and_normalize_image(style_image)
###Output
_____no_output_____
###Markdown
Generated image correlated with content imageNow, we initialize the "generated" image as a noisy image created from the content_image.* The generated image is slightly correlated with the content image.* By initializing the pixels of the generated image to be mostly noise but slightly correlated with the content image, this will help the content of the "generated" image more rapidly match the content of the "content" image. * Feel free to look in `nst_utils.py` to see the details of `generate_noise_image(...)`; to do so, click "File-->Open..." at the upper-left corner of this Jupyter notebook.
###Code
generated_image = generate_noise_image(content_image)
imshow(generated_image[0]);
###Output
_____no_output_____
###Markdown
Load pre-trained VGG19 modelNext, as explained in part (2), let's load the VGG19 model.
###Code
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
###Output
_____no_output_____
###Markdown
Content CostTo get the program to compute the content cost, we will now assign `a_C` and `a_G` to be the appropriate hidden layer activations. We will use layer `conv4_2` to compute the content cost. The code below does the following:1. Assign the content image to be the input to the VGG model.2. Set a_C to be the tensor giving the hidden layer activation for layer "conv4_2".3. Set a_G to be the tensor giving the hidden layer activation for the same layer. 4. Compute the content cost using a_C and a_G.**Note**: At this point, a_G is a tensor and hasn't been evaluated. It will be evaluated and updated at each iteration when we run the Tensorflow graph in model_nn() below.
###Code
# Assign the content image to be the input of the VGG model.
sess.run(model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
###Output
_____no_output_____
###Markdown
Style cost
###Code
# Assign the input of the model to be the "style" image
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(model, STYLE_LAYERS)
###Output
_____no_output_____
###Markdown
Exercise: total cost* Now that you have J_content and J_style, compute the total cost J by calling `total_cost()`. * Use `alpha = 10` and `beta = 40`.
###Code
### START CODE HERE ### (1 line)
J = total_cost(J_content, J_style)
### END CODE HERE ###
###Output
_____no_output_____
###Markdown
Optimizer* Use the Adam optimizer to minimize the total cost `J`.* Use a learning rate of 2.0. * [Adam Optimizer documentation](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer)
###Code
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
###Output
_____no_output_____
###Markdown
Exercise: implement the model* Implement the model_nn() function. * The function **initializes** the variables of the tensorflow graph, * **assigns** the input image (initial generated image) as the input of the VGG19 model * and **runs** the `train_step` tensor (it was created in the code above this function) for a large number of steps. Hints* To initialize global variables, use this: ```Pythonsess.run(tf.global_variables_initializer())```* Run `sess.run()` to evaluate a variable.* [assign](https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/assign) can be used like this:```pythonmodel["input"].assign(image)```
###Code
def model_nn(sess, input_image, num_iterations = 200):
# Initialize global variables (you need to run the session on the initializer)
### START CODE HERE ### (1 line)
sess.run(tf.global_variables_initializer())
### END CODE HERE ###
# Run the noisy input image (initial generated image) through the model. Use assign().
### START CODE HERE ### (1 line)
sess.run(model['input'].assign(input_image))
### END CODE HERE ###
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
### START CODE HERE ### (1 line)
sess.run(train_step)
### END CODE HERE ###
# Compute the generated image by running the session on the current model['input']
### START CODE HERE ### (1 line)
generated_image = sess.run(model['input'])
### END CODE HERE ###
# Print every 20 iteration.
if i%20 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# save current generated image in the "/output" directory
save_image("output/" + str(i) + ".png", generated_image)
# save last generated image
save_image('output/generated_image.jpg', generated_image)
return generated_image
###Output
_____no_output_____
###Markdown
Run the following cell to generate an artistic image. It should take about 3min on CPU for every 20 iterations but you start observing attractive results after ≈140 iterations. Neural Style Transfer is generally trained using GPUs.
###Code
model_nn(sess, generated_image)
###Output
Iteration 0 :
total cost = 5.05035e+09
content cost = 7877.67
style cost = 1.26257e+08
Iteration 20 :
total cost = 9.43276e+08
content cost = 15186.9
style cost = 2.35781e+07
|
notebooks/hpatches_evaluation.ipynb | ###Markdown
Descriptor evaluation on the HPatches dataset
###Code
import cv2
import numpy as np
import matplotlib.pyplot as plt
import brewer2mpl
from lisrd.evaluation.descriptor_evaluation import run_descriptor_evaluation
%matplotlib inline
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Illumination changes
###Code
config = {
'name': 'hpatches',
'data_path': '/home/remi/Documents/datasets/HPatches_sequences/',
'alteration': 'i',
'batch_size': 1,
'test_batch_size': 1,
'sizes': {'test': 285},
'resize': [480, 640],
'models_name': ['kornia_sift', 'hard_net', 'sos_net', 'superpoint', 'd2_net', 'r2d2', 'gift', 'lisrd'],
'num_kp': 1000,
'correctness_threshold': 3,
'max_mma_threshold': 10
}
H_estimation_i, precision_i, recall_i, mma_i = run_descriptor_evaluation(config)
metrics_i = {'Homography estimation': H_estimation_i, 'Precision': precision_i, 'Recall': recall_i}
for metric, value in metrics_i.items():
print(metric)
for m in config['models_name']:
print(m + ':', value[m])
print()
###Output
Homography estimation
kornia_sift: 0.9333333333333333
hard_net: 0.9403508771929825
sos_net: 0.9333333333333333
superpoint: 0.9122807017543859
d2_net: 0.9052631578947369
r2d2: 0.9368421052631579
gift: 0.9438596491228071
lisrd: 0.9473684210526315
Precision
kornia_sift: 0.7814668489337294
hard_net: 0.7018632301523557
sos_net: 0.747846007673818
superpoint: 0.7097122272108113
d2_net: 0.7250510103286282
r2d2: 0.7713281612812792
gift: 0.7014761240791423
lisrd: 0.7654595787015849
Recall
kornia_sift: 0.7985949067103927
hard_net: 0.7312564340404616
sos_net: 0.8211819059440518
superpoint: 0.8110394621011607
d2_net: 0.7750460070551969
r2d2: 0.8142594637050821
gift: 0.6807199062715257
lisrd: 0.919741569466567
###Markdown
Viewpoint changes
###Code
config['alteration'] = 'v'
config['sizes'] = {'test': 295}
H_estimation_v, precision_v, recall_v, mma_v = run_descriptor_evaluation(config)
metrics_v = {'Homography estimation': H_estimation_v, 'Precision': precision_v, 'Recall': recall_v}
for metric, value in metrics_v.items():
print(metric)
for m in config['models_name']:
print(m + ':', value[m])
print()
###Output
Homography estimation
kornia_sift: 0.5661016949152542
hard_net: 0.6644067796610169
sos_net: 0.6983050847457627
superpoint: 0.6711864406779661
d2_net: 0.6169491525423729
r2d2: 0.6203389830508474
gift: 0.6983050847457627
lisrd: 0.688135593220339
Precision
kornia_sift: 0.6506764346362675
hard_net: 0.7006170261724549
sos_net: 0.7271578338500188
superpoint: 0.6852562871168503
d2_net: 0.6662432027554812
r2d2: 0.6661881245757615
gift: 0.6865851404683905
lisrd: 0.7305360618193362
Recall
kornia_sift: 0.5272233342870664
hard_net: 0.7335794158486386
sos_net: 0.7601566467915619
superpoint: 0.7502009544127654
d2_net: 0.6641766541573937
r2d2: 0.6389377317008912
gift: 0.6599955223070748
lisrd: 0.7573480641226967
###Markdown
MMA curves
###Code
def plot_mma(config, captions, mma_i, mma_v):
models = config['models_name']
n_models = len(models)
colors = np.array(brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors)[:n_models]
linestyles = ['-'] * n_models
plt_lim = [1, config['max_mma_threshold']]
plt_rng = np.arange(plt_lim[0], plt_lim[1] + 1)
plt.rc('axes', titlesize=25)
plt.rc('axes', labelsize=25)
plt.figure(figsize=(15, 5))
# Full evaluation
plt.subplot(1, 3, 1)
n_i = 285
n_v = 295
for model, caption, color, ls in zip(models, captions, colors, linestyles):
i_err, v_err = mma_i[model], mma_v[model]
plt.plot(plt_rng, (n_i * i_err + n_v * v_err) / (n_i + n_v),
color=color, ls=ls, linewidth=3, label=caption)
plt.title('Overall')
plt.xlabel('Threshold [px]')
plt.xlim(plt_lim)
plt.xticks(plt_rng)
plt.ylabel('Precision')
plt.ylim([0.3, 1])
plt.grid()
plt.tick_params(axis='both', which='major', labelsize=20)
# Plot for the illumination split
plt.subplot(1, 3, 2)
for model, caption, color, ls in zip(models, captions, colors, linestyles):
plt.plot(plt_rng, mma_i[model], color=color, ls=ls, linewidth=3, label=caption)
plt.title('Illumination')
plt.xlabel('Threshold [px]')
plt.xlim(plt_lim)
plt.xticks(plt_rng)
plt.ylim([0.3, 1])
plt.gca().axes.set_yticklabels([])
plt.grid()
plt.tick_params(axis='both', which='major', labelsize=20)
# Plot for the viewpoint split
plt.subplot(1, 3, 3)
for model, caption, color, ls in zip(models, captions, colors, linestyles):
plt.plot(plt_rng, mma_v[model], color=color, ls=ls, linewidth=3, label=caption)
plt.title('Viewpoint')
plt.xlabel('Threshold [px]')
plt.xlim(plt_lim)
plt.xticks(plt_rng)
plt.ylim([0.3, 1])
plt.gca().axes.set_yticklabels([])
plt.grid()
plt.tick_params(axis='both', which='major', labelsize=20)
plt.legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0, fontsize=20)
captions = ['Root SIFT', 'HardNet', 'SOSNet', 'SuperPoint', 'D2-Net', 'R2D2', 'GIFT', 'LISRD']
plot_mma(config, captions, mma_i, mma_v)
###Output
_____no_output_____ |
10. class.ipynb | ###Markdown
Introduction to Class and object with OOPs in Python 1. Class2. Object3. Methods4. Inheritance5. Encapsulation6. Polymorphism7. Key Points to Remember ClassA class is a blueprint for the object.1. We can think of class as an sketch of a parrot with labels. 2. It contains all the details about the name, colors, size etc. 3. Based on these descriptions, we can study about the parrot. Here, parrot is an object.The example for class of parrot can be :
###Code
class Parrot:
pass
###Output
_____no_output_____
###Markdown
Here, we use class keyword to define an empty class Parrot. From class, we construct instances. An instance is a specific object created from a particular class. ObjectAn object (instance) is an instantiation of a class. When class is defined, only the description for the object is defined. Therefore, no memory or storage is allocated.The example for object of parrot class can be:
###Code
obj = Parrot()
###Output
_____no_output_____
###Markdown
Here, obj is object of class Parrot.Suppose we have details of parrot. Now, we are going to show how to build the class and objects of parrot.
###Code
class Pythno:
x= [5,2,3,4,10,20,30]
y= [2,3,4,5,6]
c =x+y
#print(x)
p1 = Pythno()
print(p1.x,p1.y,p1.c)
class myclass:
x = "hello"
y = "World"
z = x+y
v = myclass()
print(v.z)
print(v.x)
class Person:
def __init__(self,name,age):
self.name = name
self.age = age
def myfunction(self):
print("hello my name is " + self.name)
#print("My age is" +self.age)
p1 = Person ("Reddy", 27)
p1.myfunction()
print(p1.name)
print(p1.age)
class Parrot:
# class attribute
species = "bird"
# instance attribute
def __init__(self, name, age):
self.name = name
self.age = age
# instantiate the Parrot class
blu = Parrot("Blu", 10)
woo = Parrot("Woo", 15)
# access the class attributes
print("Blu is a {}".format(blu.__class__.species))
print("Woo is also a {}".format(woo.__class__.species))
# access the instance attributes
print("{} is {} years old".format( blu.name, blu.age))
print("{} is {} years old".format( woo.name, woo.age))
class Add:
x = 2
y = 3
z = x+y
p1 = Add()
print(p1.z)
class sub:
x = 2
y = 3
z = x-y
p = sub()
print(p.z)
###Output
5
-1
###Markdown
MethodsMethods are functions defined inside the body of a class. They are used to define the behaviors of an object.
###Code
class Parrot:
# instance attributes
def __init__(self, name, age):
self.name = name
self.age = age
# instance method
def sing(self, song):
return "{} sings {}".format(self.name, song)
def dance(self):
return "{} is now dancing".format(self.name)
# instantiate the object
blu = Parrot("Blu", 10)
# call our instance methods
print(blu.sing("'Happy'"))
print(blu.dance())
###Output
Blu sings 'Happy'
Blu is now dancing
###Markdown
Introduction to OOPs in PythonPython is a multi-paradigm programming language. Meaning, it supports different programming approach.One of the popular approach to solve a programming problem is by creating objects. This is known as Object-Oriented Programming (OOP).An object has two characteristics:attributesbehaviorLet's take an example:Parrot is an object,name, age, color are attributessinging, dancing are behaviorThe concept of OOP in Python focuses on creating reusable code. This concept is also known as DRY (Don't Repeat Yourself). InheritanceInheritance is a way of creating new class for using details of existing class without modifying it. The newly formed class is a derived class (or child class). Similarly, the existing class is a base class (or parent class).
###Code
# parent class
class Bird:
def __init__(self):
print("Bird is ready")
def whoisThis(self):
print("Bird")
def swim(self):
print("Swim faster")
# child class
class Penguin(Bird):
def __init__(self):
# call super() function
super().__init__()
print("Penguin is ready")
def whoisThis(self):
print("Penguin")
def run(self):
print("Run faster")
peggy = Penguin()
peggy.whoisThis()
peggy.swim()
peggy.run()
###Output
Bird is ready
Penguin is ready
Penguin
Swim faster
Run faster
###Markdown
In the above program, we created two classes i.e. Bird (parent class) and Penguin (child class). The child class inherits the functions of parent class. We can see this from swim() method. Again, the child class modified the behavior of parent class. We can see this from whoisThis() method. Furthermore, we extend the functions of parent class, by creating a new run() method.Additionally, we use super() function before __init__() method. This is because we want to pull the content of __init__() method from the parent class into the child class. EncapsulationUsing OOP in Python, we can restrict access to methods and variables. This prevent data from direct modification which is called encapsulation. In Python, we denote private attribute using underscore as prefix i.e single “ _ “ or double “ __“.
###Code
class Computer:
def __init__(self):
self.__maxprice = 900
def sell(self):
print("Selling Price: {}".format(self.__maxprice))
def setMaxPrice(self, price):
self.__maxprice = price
c = Computer()
c.sell()
# change the price
c.__maxprice = 1000
c.sell()
# using setter function
c.setMaxPrice(1000)
c.sell()
###Output
Selling Price: 900
Selling Price: 900
Selling Price: 1000
###Markdown
In the above program, we defined a class Computer. We use __init__() method to store the maximum selling price of computer. We tried to modify the price. However, we can’t change it because Python treats the __maxprice as private attributes. To change the value, we used a setter function i.e setMaxPrice() which takes price as parameter. PolymorphismPolymorphism is an ability (in OOP) to use common interface for multiple form (data types).Suppose, we need to color a shape, there are multiple shape option (rectangle, square, circle). However we could use same method to color any shape. This concept is called Polymorphism.
###Code
class Parrot:
def fly(self):
print("Parrot can fly")
def swim(self):
print("Parrot can't swim")
class Penguin:
def fly(self):
print("Penguin can't fly")
def swim(self):
print("Penguin can swim")
# common interface
def flying_test(bird):
bird.fly()
#instantiate objects
blu = Parrot()
peggy = Penguin()
# passing the object
flying_test(blu)
flying_test(peggy)
###Output
Parrot can fly
Penguin can't fly
###Markdown
In the above program, we defined two classes Parrot and Penguin. Each of them have common method fly() method. However, their functions are different. To allow polymorphism, we created common interface i.e flying_test() function that can take any object. Then, we passed the objects blu and peggy in the flying_test() function, it ran effectively. Key Points to Remember:1. The programming gets easy and efficient.2. The class is sharable, so codes can be reused.3. The productivity of programmars increases4. Data is safe and secure with data abstraction. CLASS and OBJECT1. What are classes and objects in Python?2. Defining a Class in Python3. Creating an Object in Python4. Constructors in Python5. Deleting Attributes and Objects What are classes and objects in Python?Python is an object oriented programming language. Unlike procedure oriented programming, where the main emphasis is on functions, object oriented programming stress on objects.Object is simply a collection of data (variables) and methods (functions) that act on those data. And, class is a blueprint for the object.We can think of class as a sketch (prototype) of a house. It contains all the details about the floors, doors, windows etc. Based on these descriptions we build the house. House is the object.As, many houses can be made from a description, we can create many objects from a class. An object is also called an instance of a class and the process of creating this object is called instantiation. Defining a Class in PythonLike function definitions begin with the keyword def, in Python, we define a class using the keyword class.The first string is called docstring and has a brief description about the class. Although not mandatory, this is recommended.Here is a simple class definition.
###Code
class MyNewClass:
'''This is a docstring. I have created a new class'''
pass
###Output
_____no_output_____
###Markdown
A class creates a new local namespace where all its attributes are defined. Attributes may be data or functions.There are also special attributes in it that begins with double underscores (__). For example, __doc__ gives us the docstring of that class.As soon as we define a class, a new class object is created with the same name. This class object allows us to access the different attributes as well as to instantiate new objects of that class.
###Code
class MyClass:
"This is my second class"
a = 10
def func(self):
print('Hello')
# Output: 10
print(MyClass.a)
# Output: <function MyClass.func at 0x0000000003079BF8>
print(MyClass.func)
# Output: 'This is my second class'
print(MyClass.__doc__)
###Output
10
<function MyClass.func at 0x000002174B825BF8>
This is my second class
###Markdown
Creating an Object in PythonWe saw that the class object could be used to access different attributes.It can also be used to create new object instances (instantiation) of that class. The procedure to create an object is similar to a function call.
###Code
ob = MyClass()
###Output
_____no_output_____
###Markdown
This will create a new instance object named ob. We can access attributes of objects using the object name prefix.Attributes may be data or method. Method of an object are corresponding functions of that class. Any function object that is a class attribute defines a method for objects of that class.This means to say, since MyClass.func is a function object (attribute of class), ob.func will be a method object.
###Code
class MyClass:
"This is my second class"
a = 10
def func(self):
print('Hello')
# create a new MyClass
ob = MyClass()
# Output: <function MyClass.func at 0x000000000335B0D0>
print(MyClass.func)
# Output: <bound method MyClass.func of <__main__.MyClass object at 0x000000000332DEF0>>
print(ob.func)
# Calling function func()
# Output: Hello
ob.func()
###Output
<function MyClass.func at 0x000002174B82D0D0>
<bound method MyClass.func of <__main__.MyClass object at 0x000002174B828E80>>
Hello
###Markdown
You may have noticed the self parameter in function definition inside the class but, we called the method simply as ob.func() without any arguments. It still worked.This is because, whenever an object calls its method, the object itself is passed as the first argument. So, ob.func() translates into MyClass.func(ob).In general, calling a method with a list of n arguments is equivalent to calling the corresponding function with an argument list that is created by inserting the method's object before the first argument.For these reasons, the first argument of the function in class must be the object itself. This is conventionally called self. It can be named otherwise but we highly recommend to follow the convention.Now you must be familiar with class object, instance object, function object, method object and their differences. Constructors in PythonClass functions that begins with double underscore (__) are called special functions as they have special meaning.Of one particular interest is the __init__() function. This special function gets called whenever a new object of that class is instantiated.This type of function is also called constructors in Object Oriented Programming (OOP). We normally use it to initialize all the variables.
###Code
class ComplexNumber:
def __init__(self,r = 0,i = 0):
self.real = r
self.imag = i
def getData(self):
print("{0}+{1}j".format(self.real,self.imag))
# Create a new ComplexNumber object
c1 = ComplexNumber(2,3)
# Call getData() function
# Output: 2+3j
c1.getData()
# Create another ComplexNumber object
# and create a new attribute 'attr'
c2 = ComplexNumber(5)
c2.attr = 10
# Output: (5, 0, 10)
print((c2.real, c2.imag, c2.attr))
# but c1 object doesn't have attribute 'attr'
# AttributeError: 'ComplexNumber' object has no attribute 'attr'
c1.attr
###Output
2+3j
(5, 0, 10)
###Markdown
In the above example, we define a new class to represent complex numbers. It has two functions, __init__() to initialize the variables (defaults to zero) and getData() to display the number properly.An interesting thing to note in the above step is that attributes of an object can be created on the fly. We created a new attribute attr for object c2 and we read it as well. But this did not create that attribute for object c1. Deleting Attributes and ObjectsAny attribute of an object can be deleted anytime, using the del statement. Try the following on the Python shell to see the output.
###Code
c1 = ComplexNumber(2,3)
del c1.imag
c1.getData()
del ComplexNumber.getData
c1.getData()
###Output
_____no_output_____
###Markdown
We can even delete the object itself, using the del statement.
###Code
c1 = ComplexNumber(1,3)
del c1
c1
###Output
_____no_output_____
###Markdown
Actually, it is more complicated than that. When we do c1 = ComplexNumber(1,3), a new instance object is created in memory and the name c1 binds with it.On the command del c1, this binding is removed and the name c1 is deleted from the corresponding namespace. The object however continues to exist in memory and if no other name is bound to it, it is later automatically destroyed.This automatic destruction of unreferenced objects in Python is also called garbage collection.  Inheritance:1. What is Inheritance?2. Python Inheritance Syntax 1. Example of Inheritance in Python3. Method Overriding in Python What is Inheritance?Inheritance is a powerful feature in object oriented programming.It refers to defining a new class with little or no modification to an existing class. The new class is called derived (or child) class and the one from which it inherits is called the base (or parent) class.
###Code
class BaseClass:
Body of base class
class DerivedClass(BaseClass):
Body of derived class
###Output
_____no_output_____
###Markdown
Derived class inherits features from the base class, adding new features to it. This results into re-usability of code. Example of Inheritance in PythonTo demonstrate the use of inheritance, let us take an example.A polygon is a closed figure with 3 or more sides. Say, we have a class called Polygon defined as follows.
###Code
class Polygon:
def __init__(self, no_of_sides):
self.n = no_of_sides
self.sides = [0 for i in range(no_of_sides)]
def inputSides(self):
self.sides = [float(input("Enter side "+str(i+1)+" : ")) for i in range(self.n)]
def dispSides(self):
for i in range(self.n):
print("Side",i+1,"is",self.sides[i])
###Output
_____no_output_____
###Markdown
This class has data attributes to store the number of sides, n and magnitude of each side as a list, sides.Method inputSides() takes in magnitude of each side and similarly, dispSides() will display these properly.A triangle is a polygon with 3 sides. So, we can created a class called Triangle which inherits from Polygon. This makes all the attributes available in class Polygon readily available in Triangle. We don't need to define them again (code re-usability). Triangle is defined as follows.
###Code
class Triangle(Polygon):
def __init__(self):
Polygon.__init__(self,3)
def findArea(self):
a, b, c = self.sides
# calculate the semi-perimeter
s = (a + b + c) / 2
area = (s*(s-a)*(s-b)*(s-c)) ** 0.5
print('The area of the triangle is %0.2f' %area)
t = Triangle()
t.inputSides()
###Output
Enter side 1 : 3
Enter side 2 : 5
Enter side 3 : 4
###Markdown
However, class Triangle has a new method findArea() to find and print the area of the triangle. Here is a sample run.
###Code
t.dispSides()
t.findArea()
###Output
The area of the triangle is 6.00
###Markdown
We can see that, even though we did not define methods like inputSides() or dispSides() for class Triangle, we were able to use them.If an attribute is not found in the class, search continues to the base class. This repeats recursively, if the base class is itself derived from other classes. Method Overriding in PythonIn the above example, notice that __init__() method was defined in both classes, Triangle as well Polygon. When this happens, the method in the derived class overrides that in the base class. This is to say, __init__() in Triangle gets preference over the same in Polygon.Generally when overriding a base method, we tend to extend the definition rather than simply replace it. The same is being done by calling the method in base class from the one in derived class (calling Polygon.__init__() from __init__() in Triangle).A better option would be to use the built-in function super(). So, super().__init__(3) is equivalent to Polygon.__init__(self,3) and is preferred. You can learn more about the super() function in Python.Two built-in functions isinstance() and issubclass() are used to check inheritances. Function isinstance() returns True if the object is an instance of the class or other classes derived from it. Each and every class in Python inherits from the base class object.
###Code
isinstance(t,Triangle)
isinstance(t,Polygon)
isinstance(t,int)
isinstance(t,object)
issubclass(Polygon,Triangle)
issubclass(Triangle,Polygon)
issubclass(bool,int)
###Output
_____no_output_____
###Markdown
Python Multiple Inheritance 1. Multiple Inheritance in Python2. Multilevel Inheritance in Python3. Method Resolution Order in Python Multiple Inheritance in PythonLike C++, a class can be derived from more than one base classes in Python. This is called multiple inheritance.In multiple inheritance, the features of all the base classes are inherited into the derived class. The syntax for multiple inheritance is similar to single inheritance.
###Code
class Base1:
pass
class Base2:
pass
class MultiDerived(Base1, Base2):
pass
###Output
_____no_output_____
###Markdown
Here, MultiDerived is derived from classes Base1 and Base2. Multilevel Inheritance in PythonOn the other hand, we can also inherit form a derived class. This is called multilevel inheritance. It can be of any depth in Python.In multilevel inheritance, features of the base class and the derived class is inherited into the new derived class.An example with corresponding visualization is given below.
###Code
class Base:
pass
class Derived1(Base):
pass
class Derived2(Derived1):
pass
###Output
_____no_output_____
###Markdown
Method Resolution Order in PythonEvery class in Python is derived from the class object. It is the most base type in Python.So technically, all other class, either built-in or user-defines, are derived classes and all objects are instances of object class.
###Code
# Output: True
print(issubclass(list,object))
# Output: True
print(isinstance(5.5,object))
# Output: True
print(isinstance("Hello",object))
class X: pass
class Y: pass
class Z: pass
class A(X,Y): pass
class B(Y,Z): pass
class M(B,A,Z): pass
# Output:
# [<class '__main__.M'>, <class '__main__.B'>,
# <class '__main__.A'>, <class '__main__.X'>,
# <class '__main__.Y'>, <class '__main__.Z'>,
# <class 'object'>]
print(M.mro())
###Output
[<class '__main__.M'>, <class '__main__.B'>, <class '__main__.A'>, <class '__main__.X'>, <class '__main__.Y'>, <class '__main__.Z'>, <class 'object'>]
###Markdown
Introduction to Class and object with OOPs in Python 1. Class2. Object3. Methods4. Inheritance5. Encapsulation6. Polymorphism7. Key Points to Remember ClassA class is a blueprint for the object.1. We can think of class as an sketch of a parrot with labels. 2. It contains all the details about the name, colors, size etc. 3. Based on these descriptions, we can study about the parrot. Here, parrot is an object.The example for class of parrot can be :
###Code
class Parrot:
pass
###Output
_____no_output_____
###Markdown
Here, we use class keyword to define an empty class Parrot. From class, we construct instances. An instance is a specific object created from a particular class. ObjectAn object (instance) is an instantiation of a class. When class is defined, only the description for the object is defined. Therefore, no memory or storage is allocated.The example for object of parrot class can be:
###Code
obj = Parrot()
###Output
_____no_output_____
###Markdown
Here, obj is object of class Parrot.Suppose we have details of parrot. Now, we are going to show how to build the class and objects of parrot.
###Code
class Pythno:
x= [5,2,3,4,10,20,30]
y= [2,3,4,5,6]
c =x+y
#print(x)
p1 = Pythno()
print(p1.x,p1.y,p1.c)
class myclass:
x = "hello"
y = "World"
z = x+y
v = myclass()
print(v.z)
print(v.x)
class Person:
def __init__(self,name,age):
self.name = name
self.age = age
def myfunction(self):
print("hello my name is " + self.name)
#print("My age is" +self.age)
p1 = Person ("Reddy", 27)
p1.myfunction()
print(p1.name)
print(p1.age)
class Parrot:
# class attribute
species = "bird"
# instance attribute
def __init__(self, name, age):
self.name = name
self.age = age
# instantiate the Parrot class
blu = Parrot("Blu", 10)
woo = Parrot("Woo", 15)
# access the class attributes
print("Blu is a {}".format(blu.__class__.species))
print("Woo is also a {}".format(woo.__class__.species))
# access the instance attributes
print("{} is {} years old".format( blu.name, blu.age))
print("{} is {} years old".format( woo.name, woo.age))
class Add:
x = 2
y = 3
z = x+y
p1 = Add()
print(p1.z)
class sub:
x = 2
y = 3
z = x-y
p = sub()
print(p.z)
###Output
5
-1
###Markdown
MethodsMethods are functions defined inside the body of a class. They are used to define the behaviors of an object.
###Code
class Parrot:
# instance attributes
def __init__(self, name, age):
self.name = name
self.age = age
# instance method
def sing(self, song):
return "{} sings {}".format(self.name, song)
def dance(self):
return "{} is now dancing".format(self.name)
# instantiate the object
blu = Parrot("Blu", 10)
# call our instance methods
print(blu.sing("'Happy'"))
print(blu.dance())
###Output
Blu sings 'Happy'
Blu is now dancing
###Markdown
Introduction to OOPs in PythonPython is a multi-paradigm programming language. Meaning, it supports different programming approach.One of the popular approach to solve a programming problem is by creating objects. This is known as Object-Oriented Programming (OOP).An object has two characteristics:attributesbehaviorLet's take an example:Parrot is an object,name, age, color are attributessinging, dancing are behaviorThe concept of OOP in Python focuses on creating reusable code. This concept is also known as DRY (Don't Repeat Yourself). InheritanceInheritance is a way of creating new class for using details of existing class without modifying it. The newly formed class is a derived class (or child class). Similarly, the existing class is a base class (or parent class).
###Code
# parent class
class Bird:
def __init__(self):
print("Bird is ready")
def whoisThis(self):
print("Bird")
def swim(self):
print("Swim faster")
# child class
class Penguin(Bird):
def __init__(self):
# call super() function
super().__init__()
print("Penguin is ready")
def whoisThis(self):
print("Penguin")
def run(self):
print("Run faster")
peggy = Penguin()
peggy.whoisThis()
peggy.swim()
peggy.run()
###Output
Bird is ready
Penguin is ready
Penguin
Swim faster
Run faster
###Markdown
In the above program, we created two classes i.e. Bird (parent class) and Penguin (child class). The child class inherits the functions of parent class. We can see this from swim() method. Again, the child class modified the behavior of parent class. We can see this from whoisThis() method. Furthermore, we extend the functions of parent class, by creating a new run() method.Additionally, we use super() function before __init__() method. This is because we want to pull the content of __init__() method from the parent class into the child class. EncapsulationUsing OOP in Python, we can restrict access to methods and variables. This prevent data from direct modification which is called encapsulation. In Python, we denote private attribute using underscore as prefix i.e single “ _ “ or double “ __“.
###Code
class Computer:
def __init__(self):
self.__maxprice = 900
def sell(self):
print("Selling Price: {}".format(self.__maxprice))
def setMaxPrice(self, price):
self.__maxprice = price
c = Computer()
c.sell()
# change the price
c.__maxprice = 1000
c.sell()
# using setter function
c.setMaxPrice(1000)
c.sell()
###Output
Selling Price: 900
Selling Price: 900
Selling Price: 1000
###Markdown
In the above program, we defined a class Computer. We use __init__() method to store the maximum selling price of computer. We tried to modify the price. However, we can’t change it because Python treats the __maxprice as private attributes. To change the value, we used a setter function i.e setMaxPrice() which takes price as parameter. PolymorphismPolymorphism is an ability (in OOP) to use common interface for multiple form (data types).Suppose, we need to color a shape, there are multiple shape option (rectangle, square, circle). However we could use same method to color any shape. This concept is called Polymorphism.
###Code
class Parrot:
def fly(self):
print("Parrot can fly")
def swim(self):
print("Parrot can't swim")
class Penguin:
def fly(self):
print("Penguin can't fly")
def swim(self):
print("Penguin can swim")
# common interface
def flying_test(bird):
bird.fly()
#instantiate objects
blu = Parrot()
peggy = Penguin()
# passing the object
flying_test(blu)
flying_test(peggy)
###Output
Parrot can fly
Penguin can't fly
###Markdown
In the above program, we defined two classes Parrot and Penguin. Each of them have common method fly() method. However, their functions are different. To allow polymorphism, we created common interface i.e flying_test() function that can take any object. Then, we passed the objects blu and peggy in the flying_test() function, it ran effectively. Key Points to Remember:1. The programming gets easy and efficient.2. The class is sharable, so codes can be reused.3. The productivity of programmars increases4. Data is safe and secure with data abstraction. CLASS and OBJECT1. What are classes and objects in Python?2. Defining a Class in Python3. Creating an Object in Python4. Constructors in Python5. Deleting Attributes and Objects What are classes and objects in Python?Python is an object oriented programming language. Unlike procedure oriented programming, where the main emphasis is on functions, object oriented programming stress on objects.Object is simply a collection of data (variables) and methods (functions) that act on those data. And, class is a blueprint for the object.We can think of class as a sketch (prototype) of a house. It contains all the details about the floors, doors, windows etc. Based on these descriptions we build the house. House is the object.As, many houses can be made from a description, we can create many objects from a class. An object is also called an instance of a class and the process of creating this object is called instantiation. Defining a Class in PythonLike function definitions begin with the keyword def, in Python, we define a class using the keyword class.The first string is called docstring and has a brief description about the class. Although not mandatory, this is recommended.Here is a simple class definition.
###Code
class MyNewClass:
'''This is a docstring. I have created a new class'''
pass
###Output
_____no_output_____
###Markdown
A class creates a new local namespace where all its attributes are defined. Attributes may be data or functions.There are also special attributes in it that begins with double underscores (__). For example, __doc__ gives us the docstring of that class.As soon as we define a class, a new class object is created with the same name. This class object allows us to access the different attributes as well as to instantiate new objects of that class.
###Code
class MyClass:
"This is my second class"
a = 10
def func(self):
print('Hello')
# Output: 10
print(MyClass.a)
# Output: <function MyClass.func at 0x0000000003079BF8>
print(MyClass.func)
# Output: 'This is my second class'
print(MyClass.__doc__)
###Output
10
<function MyClass.func at 0x000002174B825BF8>
This is my second class
###Markdown
Creating an Object in PythonWe saw that the class object could be used to access different attributes.It can also be used to create new object instances (instantiation) of that class. The procedure to create an object is similar to a function call.
###Code
ob = MyClass()
###Output
_____no_output_____
###Markdown
This will create a new instance object named ob. We can access attributes of objects using the object name prefix.Attributes may be data or method. Method of an object are corresponding functions of that class. Any function object that is a class attribute defines a method for objects of that class.This means to say, since MyClass.func is a function object (attribute of class), ob.func will be a method object.
###Code
class MyClass:
"This is my second class"
a = 10
def func(self):
print('Hello')
# create a new MyClass
ob = MyClass()
# Output: <function MyClass.func at 0x000000000335B0D0>
print(MyClass.func)
# Output: <bound method MyClass.func of <__main__.MyClass object at 0x000000000332DEF0>>
print(ob.func)
# Calling function func()
# Output: Hello
ob.func()
###Output
<function MyClass.func at 0x000002174B82D0D0>
<bound method MyClass.func of <__main__.MyClass object at 0x000002174B828E80>>
Hello
###Markdown
You may have noticed the self parameter in function definition inside the class but, we called the method simply as ob.func() without any arguments. It still worked.This is because, whenever an object calls its method, the object itself is passed as the first argument. So, ob.func() translates into MyClass.func(ob).In general, calling a method with a list of n arguments is equivalent to calling the corresponding function with an argument list that is created by inserting the method's object before the first argument.For these reasons, the first argument of the function in class must be the object itself. This is conventionally called self. It can be named otherwise but we highly recommend to follow the convention.Now you must be familiar with class object, instance object, function object, method object and their differences. Constructors in PythonClass functions that begins with double underscore (__) are called special functions as they have special meaning.Of one particular interest is the __init__() function. This special function gets called whenever a new object of that class is instantiated.This type of function is also called constructors in Object Oriented Programming (OOP). We normally use it to initialize all the variables.
###Code
class ComplexNumber:
def __init__(self,r = 0,i = 0):
self.real = r
self.imag = i
def getData(self):
print("{0}+{1}j".format(self.real,self.imag))
# Create a new ComplexNumber object
c1 = ComplexNumber(2,3)
# Call getData() function
# Output: 2+3j
c1.getData()
# Create another ComplexNumber object
# and create a new attribute 'attr'
c2 = ComplexNumber(5)
c2.attr = 10
# Output: (5, 0, 10)
print((c2.real, c2.imag, c2.attr))
# but c1 object doesn't have attribute 'attr'
# AttributeError: 'ComplexNumber' object has no attribute 'attr'
c1.attr
###Output
2+3j
(5, 0, 10)
###Markdown
In the above example, we define a new class to represent complex numbers. It has two functions, __init__() to initialize the variables (defaults to zero) and getData() to display the number properly.An interesting thing to note in the above step is that attributes of an object can be created on the fly. We created a new attribute attr for object c2 and we read it as well. But this did not create that attribute for object c1. Deleting Attributes and ObjectsAny attribute of an object can be deleted anytime, using the del statement. Try the following on the Python shell to see the output.
###Code
c1 = ComplexNumber(2,3)
del c1.imag
c1.getData()
del ComplexNumber.getData
c1.getData()
###Output
_____no_output_____
###Markdown
We can even delete the object itself, using the del statement.
###Code
c1 = ComplexNumber(1,3)
del c1
c1
###Output
_____no_output_____
###Markdown
Actually, it is more complicated than that. When we do c1 = ComplexNumber(1,3), a new instance object is created in memory and the name c1 binds with it.On the command del c1, this binding is removed and the name c1 is deleted from the corresponding namespace. The object however continues to exist in memory and if no other name is bound to it, it is later automatically destroyed.This automatic destruction of unreferenced objects in Python is also called garbage collection.  Inheritance:1. What is Inheritance?2. Python Inheritance Syntax 1. Example of Inheritance in Python3. Method Overriding in Python What is Inheritance?Inheritance is a powerful feature in object oriented programming.It refers to defining a new class with little or no modification to an existing class. The new class is called derived (or child) class and the one from which it inherits is called the base (or parent) class.
###Code
class BaseClass:
Body of base class
class DerivedClass(BaseClass):
Body of derived class
###Output
_____no_output_____
###Markdown
Derived class inherits features from the base class, adding new features to it. This results into re-usability of code. Example of Inheritance in PythonTo demonstrate the use of inheritance, let us take an example.A polygon is a closed figure with 3 or more sides. Say, we have a class called Polygon defined as follows.
###Code
class Polygon:
def __init__(self, no_of_sides):
self.n = no_of_sides
self.sides = [0 for i in range(no_of_sides)]
def inputSides(self):
self.sides = [float(input("Enter side "+str(i+1)+" : ")) for i in range(self.n)]
def dispSides(self):
for i in range(self.n):
print("Side",i+1,"is",self.sides[i])
###Output
_____no_output_____
###Markdown
This class has data attributes to store the number of sides, n and magnitude of each side as a list, sides.Method inputSides() takes in magnitude of each side and similarly, dispSides() will display these properly.A triangle is a polygon with 3 sides. So, we can created a class called Triangle which inherits from Polygon. This makes all the attributes available in class Polygon readily available in Triangle. We don't need to define them again (code re-usability). Triangle is defined as follows.
###Code
class Triangle(Polygon):
def __init__(self):
Polygon.__init__(self,3)
def findArea(self):
a, b, c = self.sides
# calculate the semi-perimeter
s = (a + b + c) / 2
area = (s*(s-a)*(s-b)*(s-c)) ** 0.5
print('The area of the triangle is %0.2f' %area)
t = Triangle()
t.inputSides()
###Output
Enter side 1 : 3
Enter side 2 : 5
Enter side 3 : 4
###Markdown
However, class Triangle has a new method findArea() to find and print the area of the triangle. Here is a sample run.
###Code
t.dispSides()
t.findArea()
###Output
The area of the triangle is 6.00
###Markdown
We can see that, even though we did not define methods like inputSides() or dispSides() for class Triangle, we were able to use them.If an attribute is not found in the class, search continues to the base class. This repeats recursively, if the base class is itself derived from other classes. Method Overriding in PythonIn the above example, notice that __init__() method was defined in both classes, Triangle as well Polygon. When this happens, the method in the derived class overrides that in the base class. This is to say, __init__() in Triangle gets preference over the same in Polygon.Generally when overriding a base method, we tend to extend the definition rather than simply replace it. The same is being done by calling the method in base class from the one in derived class (calling Polygon.__init__() from __init__() in Triangle).A better option would be to use the built-in function super(). So, super().__init__(3) is equivalent to Polygon.__init__(self,3) and is preferred. You can learn more about the super() function in Python.Two built-in functions isinstance() and issubclass() are used to check inheritances. Function isinstance() returns True if the object is an instance of the class or other classes derived from it. Each and every class in Python inherits from the base class object.
###Code
isinstance(t,Triangle)
isinstance(t,Polygon)
isinstance(t,int)
isinstance(t,object)
issubclass(Polygon,Triangle)
issubclass(Triangle,Polygon)
issubclass(bool,int)
###Output
_____no_output_____
###Markdown
Python Multiple Inheritance 1. Multiple Inheritance in Python2. Multilevel Inheritance in Python3. Method Resolution Order in Python Multiple Inheritance in PythonLike C++, a class can be derived from more than one base classes in Python. This is called multiple inheritance.In multiple inheritance, the features of all the base classes are inherited into the derived class. The syntax for multiple inheritance is similar to single inheritance.
###Code
class Base1:
pass
class Base2:
pass
class MultiDerived(Base1, Base2):
pass
###Output
_____no_output_____
###Markdown
Here, MultiDerived is derived from classes Base1 and Base2. Multilevel Inheritance in PythonOn the other hand, we can also inherit form a derived class. This is called multilevel inheritance. It can be of any depth in Python.In multilevel inheritance, features of the base class and the derived class is inherited into the new derived class.An example with corresponding visualization is given below.
###Code
class Base:
pass
class Derived1(Base):
pass
class Derived2(Derived1):
pass
###Output
_____no_output_____
###Markdown
Method Resolution Order in PythonEvery class in Python is derived from the class object. It is the most base type in Python.So technically, all other class, either built-in or user-defines, are derived classes and all objects are instances of object class.
###Code
# Output: True
print(issubclass(list,object))
# Output: True
print(isinstance(5.5,object))
# Output: True
print(isinstance("Hello",object))
class X: pass
class Y: pass
class Z: pass
class A(X,Y): pass
class B(Y,Z): pass
class M(B,A,Z): pass
# Output:
# [<class '__main__.M'>, <class '__main__.B'>,
# <class '__main__.A'>, <class '__main__.X'>,
# <class '__main__.Y'>, <class '__main__.Z'>,
# <class 'object'>]
print(M.mro())
###Output
[<class '__main__.M'>, <class '__main__.B'>, <class '__main__.A'>, <class '__main__.X'>, <class '__main__.Y'>, <class '__main__.Z'>, <class 'object'>]
|
Solution_2_basic_U-Net.ipynb | ###Markdown
ref: https://www.kaggle.com/erikistre/pytorch-basic-u-net 比赛名次---Fight!!! 思路最重要  basic u-net 严重过拟合 没有测试时平均 0.835阈值 FAQ: 若使用U-Net系列网络,因为一般是下采样5次,故要求输入图片的宽和高是32的倍数,我们的原始数据的101x101的,所以resize到离101最近的32的倍数,也就是128x128, 不过在后续还可以尝试使用padding的方式填充到128,而不用缩放 改进的切入点: (务必保证模块化替换,代码简单逻辑清晰便于修改) * 数据处理环节 * 如果使用预训练模型 那么对图片的归一化处理必须与其一致 * 使用0填充形式的resize,使用左右水平翻转增广数据,在测试阶段使用左右水平翻转平均预测 ``` python resize image (101,101) -> (128,128) def image_in(img): out = np.zeros((128,128), dtype=np.uint8) ih = img[:, ::-1] iv = img[::-1, :] ihv= ih[::-1, :] out[13:114, 13:114] = img out[0:13,0:13] = ihv[-13:,-13:] out[0:13,13:114] = iv[-13:,:] out[0:13, -14:] = ihv[-13:, 0:14] out[13:114, 0:13] = ih[:,-13:] out[-14:, 0:13] = ihv[0:14:,-13:] out[-14:,13:114] = iv[0:14,:] out[-14:,-14:] = ihv[0:14,0:14] out[13:114,-14:] =ih[:,0:14] return(out) go back to original size (128,128) -> (101,101) def image_out(img): return img[13:114,13:114] for 4 dims use [:,13:114,13:114,:] instead using heavy augmentations, flip left-right is simple and maybe is the only method allowed def flip(): X_aug = X_train[:,:,::-1,:] y_aug = y_train[:,:,::-1,:] this is tricky having pyotrch model trained, even with augmented left-right data, try predict with normal and mirrored X_test and take the average def dummy_prediction(): ... y_preds = model.predict(X_test, verbose=0) if mirror==True: m_preds = model.predict(X_test[:,:,::-1,:], verbose=0) y_preds = 0.5 * (y_preds + m_preds[:,:,::-1,:]) ``` * 网络架构设计环节 * 这个可以查Kaggle、谷歌学术相关比赛论文(这些论文一般5-6页)、github上找开源Pytorch代码 * 对logits解码和ground truth的loss计算环节 * 设计多任务Loss函数 * 优化器设置 学习步长设置环节 * Fast.ai 出品的加速收敛的工具包 * 多阶段学习率 * 各种优化器 * Added dropout to the model which seems to improve performance. * 训练方法: 先freeze encoder参数,训练decoder参数,最后整体微调 * 保存和加载参数、记录信息、给训练和测试阶段加点计时代码 Load Libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import os
from glob import glob
import sys
import random
from tqdm import tqdm_notebook # 在jupyter-notebook中展示精度条时,只能使用 tqdm_notebook
from skimage.io import imread, imshow
from skimage.transform import resize
from sklearn.metrics import jaccard_similarity_score
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as dsets
from torch.autograd import Variable
###Output
_____no_output_____
###Markdown
Image Preparation
###Code
# Set some parameters# Set s
im_width = 128
im_height = 128
im_chan = 3 # 通道数是1????
path_train = 'TrainData'
path_test = 'TestData'
train_path_images = os.path.abspath(path_train + "/images/")
train_path_masks = os.path.abspath(path_train + "/masks/")
test_path_images = os.path.abspath(path_test + "/images/")
test_path_masks = os.path.abspath(path_test + "/masks/")
train_path_images_list = glob(os.path.join(train_path_images, "*.png"))
train_path_masks_list = glob(os.path.join(train_path_masks, "*.png"))
test_path_images_list = glob(os.path.join(test_path_images, "*.png"))
test_path_masks_list = glob(os.path.join(test_path_masks, "*.png"))
# 展示训练集中的几张图片
ids= ['1f1cc6b3a4','5b7c160d0d','6c40978ddf','7dfdf6eeb8','7e5a6e5013']
plt.figure(figsize=(20,10))
for j, img_name in enumerate(ids):
q = j+1
img = imread(train_path_images + "/" + img_name + '.png')
print(img.shape)
img_mask = imread(train_path_masks + "/" + img_name + '.png')
print(img_mask.shape)
plt.subplot(1,2*(1+len(ids)),q*2-1)
plt.imshow(img)
plt.subplot(1,2*(1+len(ids)),q*2)
plt.imshow(img_mask)
plt.show()
print(img) # 颜色数组中的RGB上对应的位置都相同 故可以只保存1个通道 压缩保存
train_ids = next(os.walk(train_path_images))[2]
test_ids = next(os.walk(test_path_images))[2]
print(len(list(train_ids))) #生成器类型 生成字符串 "xxxxxxxxx.png"
print(len(list(test_ids))) # 同上
#--------------- Get Training Images and resize train images and masks ------------------------------
X_train = np.zeros((len(train_ids), im_height, im_width, im_chan), dtype=np.uint8)
Y_train = np.zeros((len(train_ids), im_height, im_width, 1), dtype=np.bool_)
print('Getting and resizing train images and masks ... ')
sys.stdout.flush()
for n, id_ in tqdm_notebook(enumerate(train_ids), total=len(train_ids)):
img = imread(path_train + '/images/' + id_)
if n==1:
print("raw img shape:",img.shape) # 打印其中的1张图片的原始shape # 由于颜色数组中的RGB上对应的位置都相同 故可以只保存1个通道 压缩保存
x = resize(img, (128, 128, 3), mode='constant', preserve_range=True) # 缩放至128x128x1
X_train[n] = x
mask = imread(path_train + '/masks/' + id_)
Y_train[n] = resize(mask, (128, 128, 1),
mode='constant',
preserve_range=True)
print('Done!')
print('X_train:',X_train.shape)
print("Y_train:",Y_train.shape)
# Check if training data looks all right
ix =random.randint(0, len(train_ids))
print((X_train[ix].shape))
bb=X_train[ix].copy()
plt.imshow(bb) # X_train是1通道压缩保存,stack到3通道才能正常展示图片
plt.show()
tmp = np.squeeze(Y_train[ix]).astype(np.float32)
print("mask:",tmp)
cc=np.dstack((tmp,tmp,tmp))
print(cc.shape)
plt.imshow(cc)
plt.show()
bb=bb.transpose(2,0,1)
cc=cc.transpose(2,0,1)
# instead using heavy augmentations,
# flip left-right is simple and maybe is the only method allowed
def flip():
# X_aug = bb[:,::-1,:]
# y_aug = cc[:,::-1,:]
X_aug = bb[:,:,::-1]
y_aug = cc[:,:,::-1]
return X_aug,y_aug
X_aug,y_aug=flip() #[c,h,w]
X_aug=X_aug.transpose(1,2,0)
y_aug=y_aug.transpose(1,2,0)
print((X_aug.shape))
plt.imshow(X_aug) # X_train是1通道压缩保存,stack到3通道才能正常展示图片
plt.show()
print(y_aug.shape)
plt.imshow(y_aug)
plt.show()
###Output
(128, 128, 3)
###Markdown
Prepare Images for Pytorch
###Code
# 自定义一个Pytorch的数据加载器,必须继承自torch.utils.data.Dataset
# https://stackoverflow.com/questions/50052295/how-do-you-load-images-into-pytorch-dataloader
class saltIDDataset(torch.utils.data.Dataset):
def __init__(self,preprocessed_images,train=True, preprocessed_masks=None):
"""
Args:
text_file(string): path to text file
root_dir(string): directory with all train images
"""
self.train = train
self.images = preprocessed_images
if self.train:
self.masks = preprocessed_masks
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = self.images[idx]
mask = None
if self.train:
mask = self.masks[idx]
return (image, mask)
###Output
_____no_output_____
###Markdown
输入归一化 注意如果使用预训练的模型 则输入的归一化方法就不是下面这种(负责效果很差) 而要使用标准处理代码
###Code
from torchvision.transforms import ToTensor, Normalize, Compose
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# img_transform = Compose([
# ToTensor(),
# Normalize(mean=[0.485,], std=[0.229,])
# ])
img_transform = Compose([
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
X_train_shaped = X_train.copy().reshape(-1, 3, 128, 128)/255.0 # 线性归一化 (0,1)区间
Y_train_shaped = Y_train.copy().reshape(-1, 1, 128, 128)
X_train_shaped = X_train_shaped.astype(np.float32)
Y_train_shaped = Y_train_shaped.astype(np.float32)
X_train_shaped.shape
X_train_shaped=X_train_shaped.transpose(0,2,3,1)
Y_train_shaped=Y_train_shaped.transpose(0,2,3,1)
print("Before:",X_train_shaped.shape)
X_train_shaped_1=np.zeros((X_train_shaped.shape[0],X_train_shaped.shape[3],X_train_shaped.shape[1],X_train_shaped.shape[2]),dtype=np.float32)
Y_train_shaped_1=np.zeros((Y_train_shaped.shape[0],Y_train_shaped.shape[3],Y_train_shaped.shape[1],Y_train_shaped.shape[2]),dtype=np.float32)
for idx,sample in tqdm_notebook(enumerate(X_train_shaped),total=len(X_train_shaped)):
if idx==0:
print(img_transform(sample).shape) # 输入图片是【H,W,C】 转换结果是【C,H,W】
#X_train_shaped_1[idx]=img_transform(sample)
X_train_shaped_1[idx]= sample.transpose(2,0,1)
for idx,sample in tqdm_notebook(enumerate(Y_train_shaped),total=len(Y_train_shaped)):
Y_train_shaped_1[idx]= sample.transpose(2,0,1) # 不要对y进行标准归一化处理
print(X_train_shaped_1.shape)
print(X_train_shaped_1[0])
print(Y_train_shaped_1.shape)
print(Y_train_shaped_1[0])
###Output
_____no_output_____
###Markdown
Data Augmention
###Code
X_train_shaped_2 =X_train_shaped_1.copy()
Y_train_shaped_2=Y_train_shaped_1.copy()
print(X_train_shaped_2 is X_train_shaped_1)
print(Y_train_shaped_2 is Y_train_shaped_1)
def Flip(X,Y):
# X_aug = bb[:,::-1,:]
# y_aug = cc[:,::-1,:]
X_aug = X[:,:,:,::-1]
y_aug = Y[:,:,:,::-1]
return X_aug,y_aug
X_train_shaped_2,Y_train_shaped_2=Flip(X_train_shaped_2,Y_train_shaped_2)
X_train_shaped_3=np.concatenate([X_train_shaped_1,X_train_shaped_2],axis=0)
Y_train_shaped_3=np.concatenate([Y_train_shaped_1,Y_train_shaped_2],axis=0)
print(X_train_shaped_3.shape)
print(Y_train_shaped_3.shape)
###Output
(8000, 3, 128, 128)
(8000, 1, 128, 128)
###Markdown
设置全局固定随机种子方便复现
###Code
torch.cuda.manual_seed_all(4200)
np.random.seed(133700)
indices = list(range(len(X_train_shaped_3)))
np.random.shuffle(indices) # 打乱样本索引
val_size = 1/10 # 取训练数据的1/10样本作为验证集样本
split = np.int_(np.floor(val_size * len(X_train_shaped_3)))
print("How many samples are used as validation set:",split)
train_idxs = indices[split:]
val_idxs = indices[:split]
###Output
How many samples are used as validation set: 800
###Markdown
构造数据载入器
###Code
X_train_shaped_3[train_idxs].shape
Y_train_shaped_3[train_idxs].shape
salt_ID_dataset_train = saltIDDataset(X_train_shaped_3[train_idxs],
train=True,
preprocessed_masks=Y_train_shaped_3[train_idxs])
salt_ID_dataset_val = saltIDDataset(X_train_shaped_3[val_idxs],
train=True,
preprocessed_masks=Y_train_shaped_3[val_idxs])
batch_size = 64
train_loader = torch.utils.data.DataLoader(dataset=salt_ID_dataset_train,
batch_size=batch_size,
shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=salt_ID_dataset_val,
batch_size=batch_size,
shuffle=False)
###Output
_____no_output_____
###Markdown
定义模型 ----------------------- core 模型1:标准u-net
###Code
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
def forward(self, x):
x = self.conv(x)
return x
start_fm = 16 # 这样的好处是,调节它控制各层通道数,也就是宽度
class Unet(nn.Module):
def __init__(self):
super(Unet, self).__init__()
# Input 128x128x1
#Contracting Path
#(Double) Convolution 1
self.double_conv1 = double_conv(3, start_fm, 3, 1, 1)
#Max Pooling 1
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
#Convolution 2
self.double_conv2 = double_conv(start_fm, start_fm * 2, 3, 1, 1)
#Max Pooling 2
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
#Convolution 3
self.double_conv3 = double_conv(start_fm * 2, start_fm * 4, 3, 1, 1)
#Max Pooling 3
self.maxpool3 = nn.MaxPool2d(kernel_size=2)
#Convolution 4
self.double_conv4 = double_conv(start_fm * 4, start_fm * 8, 3, 1, 1)
#Max Pooling 4
self.maxpool4 = nn.MaxPool2d(kernel_size=2)
#Convolution 5
self.double_conv5 = double_conv(start_fm * 8, start_fm * 16, 3, 1, 1)
#Transposed Convolution 4
self.t_conv4 = nn.ConvTranspose2d(start_fm * 16, start_fm * 8, 2, 2)
# Expanding Path Convolution 4
self.ex_double_conv4 = double_conv(start_fm * 16, start_fm * 8, 3, 1, 1)
#Transposed Convolution 3
self.t_conv3 = nn.ConvTranspose2d(start_fm * 8, start_fm * 4, 2, 2)
#Convolution 3
self.ex_double_conv3 = double_conv(start_fm * 8, start_fm * 4, 3, 1, 1)
#Transposed Convolution 2
self.t_conv2 = nn.ConvTranspose2d(start_fm * 4, start_fm * 2, 2, 2)
#Convolution 2
self.ex_double_conv2 = double_conv(start_fm * 4, start_fm * 2, 3, 1, 1)
#Transposed Convolution 1
self.t_conv1 = nn.ConvTranspose2d(start_fm * 2, start_fm, 2, 2)
#Convolution 1
self.ex_double_conv1 = double_conv(start_fm * 2, start_fm, 3, 1, 1)
# One by One Conv
self.one_by_one = nn.Conv2d(start_fm, 1, 1, 1, 0)
#self.final_act = nn.Sigmoid()
def forward(self, inputs):
# Contracting Path
conv1 = self.double_conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.double_conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.double_conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.double_conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
# Bottom
conv5 = self.double_conv5(maxpool4)
# Expanding Path
t_conv4 = self.t_conv4(conv5)
cat4 = torch.cat([conv4 ,t_conv4], 1)
ex_conv4 = self.ex_double_conv4(cat4)
t_conv3 = self.t_conv3(ex_conv4)
cat3 = torch.cat([conv3 ,t_conv3], 1)
ex_conv3 = self.ex_double_conv3(cat3)
t_conv2 = self.t_conv2(ex_conv3)
cat2 = torch.cat([conv2 ,t_conv2], 1)
ex_conv2 = self.ex_double_conv2(cat2)
t_conv1 = self.t_conv1(ex_conv2)
cat1 = torch.cat([conv1 ,t_conv1], 1)
ex_conv1 = self.ex_double_conv1(cat1)
one_by_one = self.one_by_one(ex_conv1)
return one_by_one
###Output
_____no_output_____
###Markdown
模型2:变种u-net 支持预训练权重
###Code
# #############################################
# ## 网络架构及Pytorch版本的Fine-tune手法 ##
# #############################################
# from torch import nn
# from torch.nn import functional as F
# import torch
# from torchvision import models
# import torchvision
# def conv3x3(in_, out):
# return nn.Conv2d(in_, out, 3, padding=1)
# class ConvRelu(nn.Module):
# def __init__(self, in_, out):
# super().__init__()
# self.conv = conv3x3(in_, out)
# self.activation = nn.ReLU(inplace=True)
# def forward(self, x):
# x = self.conv(x)
# x = self.activation(x)
# return x
# class DecoderBlock(nn.Module):
# def __init__(self, in_channels, middle_channels, out_channels):
# super().__init__()
# self.block = nn.Sequential(
# ConvRelu(in_channels, middle_channels),
# nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1),
# nn.ReLU(inplace=True)
# )
# def forward(self, x):
# return self.block(x)
# class UNet11(nn.Module):
# def __init__(self, num_filters=32, pretrained=False):
# """
# :param num_classes:
# :param num_filters:
# :param pretrained:
# False - no pre-trained network is used
# True - encoder is pre-trained with VGG11
# """
# super().__init__()
# self.pool = nn.MaxPool2d(2, 2)
# self.encoder = models.vgg11(pretrained=pretrained).features
# self.relu = self.encoder[1]
# #print(self.relu) # output ==> RELU(inplace)
# self.conv1 = self.encoder[0]
# self.conv2 = self.encoder[3]
# self.conv3s = self.encoder[6]
# self.conv3 = self.encoder[8]
# self.conv4s = self.encoder[11]
# self.conv4 = self.encoder[13]
# self.conv5s = self.encoder[16]
# self.conv5 = self.encoder[18]
# self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2, num_filters * 8)
# self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 8)
# self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 4)
# self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2, num_filters * 2)
# self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2, num_filters)
# self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)
# self.final = nn.Conv2d(num_filters, 1, kernel_size=1) # 1x1卷积 输出是1通道
# def forward(self, x):
# conv1 = self.relu(self.conv1(x))
# conv2 = self.relu(self.conv2(self.pool(conv1)))
# conv3s = self.relu(self.conv3s(self.pool(conv2)))
# conv3 = self.relu(self.conv3(conv3s))
# conv4s = self.relu(self.conv4s(self.pool(conv3)))
# conv4 = self.relu(self.conv4(conv4s))
# conv5s = self.relu(self.conv5s(self.pool(conv4)))
# conv5 = self.relu(self.conv5(conv5s))
# center = self.center(self.pool(conv5))
# dec5 = self.dec5(torch.cat([center, conv5], 1))
# dec4 = self.dec4(torch.cat([dec5, conv4], 1))
# dec3 = self.dec3(torch.cat([dec4, conv3], 1))
# dec2 = self.dec2(torch.cat([dec3, conv2], 1))
# dec1 = self.dec1(torch.cat([dec2, conv1], 1))
# return self.final(dec1)
# def unet11(pretrained=False, **kwargs):
# """
# pretrained:
# False - no pre-trained network is used
# True - encoder is pre-trained with VGG11
# carvana - all weights are pre-trained on
# Kaggle: Carvana dataset https://www.kaggle.com/c/carvana-image-masking-challenge
# """
# if pretrained== True:
# model = UNet11(pretrained=pretrained, **kwargs)
# else:
# model= UNet11(pretrained=False, **kwargs)
# # 根据 pretrained 加载 'carvana'数据集上训练好的网络全部权重
# if pretrained == 'carvana':
# state = torch.load('TernausNet.pt',map_location={'cuda:0': 'cpu'}) #
# model.load_state_dict(state['model']) #,map_location='cpu'
# return model
# class DecoderBlockV2(nn.Module):
# def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True):
# super(DecoderBlockV2, self).__init__()
# self.in_channels = in_channels
# if is_deconv:
# """
# Paramaters for Deconvolution were chosen to avoid artifacts, following
# link https://distill.pub/2016/deconv-checkerboard/
# """
# self.block = nn.Sequential(
# ConvRelu(in_channels, middle_channels),
# nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2,
# padding=1),
# nn.ReLU(inplace=True)
# )
# else:
# self.block = nn.Sequential(
# nn.Upsample(scale_factor=2, mode='bilinear'),
# ConvRelu(in_channels, middle_channels),
# ConvRelu(middle_channels, out_channels),
# )
# def forward(self, x):
# return self.block(x)
# class AlbuNet(nn.Module):
# """
# UNet (https://arxiv.org/abs/1505.04597) with Resnet34(https://arxiv.org/abs/1512.03385) encoder
# Proposed by Alexander Buslaev: https://www.linkedin.com/in/al-buslaev/
# """
# def __init__(self, num_classes=1, num_filters=32, pretrained=False, is_deconv=False):
# """
# :param num_classes:
# :param num_filters:
# :param pretrained:
# False - no pre-trained network is used
# True - encoder is pre-trained with resnet34
# :is_deconv:
# False: bilinear interpolation is used in decoder
# True: deconvolution is used in decoder
# """
# super().__init__()
# self.num_classes = num_classes
# self.pool = nn.MaxPool2d(2, 2)
# self.encoder = torchvision.models.resnet34(pretrained=pretrained)
# self.relu = nn.ReLU(inplace=True)
# self.conv1 = nn.Sequential(self.encoder.conv1,
# self.encoder.bn1,
# self.encoder.relu,
# self.pool)
# self.conv2 = self.encoder.layer1
# self.conv3 = self.encoder.layer2
# self.conv4 = self.encoder.layer3
# self.conv5 = self.encoder.layer4
# self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8, is_deconv)
# self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
# self.dec4 = DecoderBlockV2(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
# self.dec3 = DecoderBlockV2(128 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)
# self.dec2 = DecoderBlockV2(64 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv)
# self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv)
# self.dec0 = ConvRelu(num_filters, num_filters)
# self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
# def forward(self, x):
# conv1 = self.conv1(x)
# conv2 = self.conv2(conv1)
# conv3 = self.conv3(conv2)
# conv4 = self.conv4(conv3)
# conv5 = self.conv5(conv4)
# center = self.center(self.pool(conv5))
# dec5 = self.dec5(torch.cat([center, conv5], 1))
# dec4 = self.dec4(torch.cat([dec5, conv4], 1))
# dec3 = self.dec3(torch.cat([dec4, conv3], 1))
# dec2 = self.dec2(torch.cat([dec3, conv2], 1))
# dec1 = self.dec1(dec2)
# dec0 = self.dec0(dec1)
# if self.num_classes > 1:
# x_out = F.log_softmax(self.final(dec0), dim=1)
# else:
# x_out = self.final(dec0)
# return x_out
# class UNet16(nn.Module):
# def __init__(self, num_classes=1, num_filters=32, pretrained=False, is_deconv=False):
# """
# :param num_classes:
# :param num_filters:
# :param pretrained:
# False - no pre-trained network used
# True - encoder pre-trained with VGG16
# :is_deconv:
# False: bilinear interpolation is used in decoder
# True: deconvolution is used in decoder
# """
# super().__init__()
# self.num_classes = num_classes
# self.pool = nn.MaxPool2d(2, 2)
# self.encoder = torchvision.models.vgg16(pretrained=pretrained).features
# self.relu = nn.ReLU(inplace=True)
# self.conv1 = nn.Sequential(self.encoder[0],
# self.relu,
# self.encoder[2],
# self.relu)
# self.conv2 = nn.Sequential(self.encoder[5],
# self.relu,
# self.encoder[7],
# self.relu)
# self.conv3 = nn.Sequential(self.encoder[10],
# self.relu,
# self.encoder[12],
# self.relu,
# self.encoder[14],
# self.relu)
# self.conv4 = nn.Sequential(self.encoder[17],
# self.relu,
# self.encoder[19],
# self.relu,
# self.encoder[21],
# self.relu)
# self.conv5 = nn.Sequential(self.encoder[24],
# self.relu,
# self.encoder[26],
# self.relu,
# self.encoder[28],
# self.relu)
# self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8, is_deconv)
# self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
# self.dec4 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
# self.dec3 = DecoderBlockV2(256 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)
# self.dec2 = DecoderBlockV2(128 + num_filters * 2, num_filters * 2 * 2, num_filters, is_deconv)
# self.dec1 = ConvRelu(64 + num_filters, num_filters)
# self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
# def forward(self, x):
# conv1 = self.conv1(x)
# conv2 = self.conv2(self.pool(conv1))
# conv3 = self.conv3(self.pool(conv2))
# conv4 = self.conv4(self.pool(conv3))
# conv5 = self.conv5(self.pool(conv4))
# center = self.center(self.pool(conv5))
# dec5 = self.dec5(torch.cat([center, conv5], 1))
# dec4 = self.dec4(torch.cat([dec5, conv4], 1))
# dec3 = self.dec3(torch.cat([dec4, conv3], 1))
# dec2 = self.dec2(torch.cat([dec3, conv2], 1))
# dec1 = self.dec1(torch.cat([dec2, conv1], 1))
# if self.num_classes > 1:
# x_out = F.log_softmax(self.final(dec1), dim=1)
# else:
# x_out = self.final(dec1)
# return x_out
###Output
_____no_output_____
###Markdown
Loss定义 此处可以好好设计 We define a BCEWithLogitsLoss since we're comparing pixel by pixel. In addition, we didn't include a final sigmoid activation as this loss function includes a sigmoid for us.
###Code
def get_model():
#model = unet11(pretrained='carvana')
model = unet11(pretrained=True)
model.eval()
return model.to(device)
# 加载好模型
# if torch.cuda.is_available():
# model=get_model().cuda()
# else:
# model=get_model()
model = Unet().cuda()
#optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10,13,14], gamma=0.1)
#criterion = nn.CrossEntropyLoss()
criterion = nn.BCEWithLogitsLoss() # 这么简单???????????????????????
###Output
_____no_output_____
###Markdown
开始训练并保存多个比较好的模型参数(保存代码没写)
###Code
mean_train_losses = []
mean_val_losses = []
MaxEpochs=15
for epoch in range(MaxEpochs):
train_losses = []
val_losses = []
count=1
for images, masks in train_loader:
count=count+1
if count%100==0:
print("+20%")
images = Variable(images.cuda())
masks = Variable(masks.cuda())
# images = Variable(images)
# masks = Variable(masks)
outputs = model(images)
loss = criterion(outputs, masks)
train_losses.append(loss.data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
for images, masks in val_loader:
images = Variable(images.cuda())
masks = Variable(masks.cuda())
# images = Variable(images)
# masks = Variable(masks)
outputs = model(images)
loss = criterion(outputs, masks)
val_losses.append(loss.data)
mean_train_losses.append(np.mean(train_losses))
mean_val_losses.append(np.mean(val_losses))
# Print Loss
print('Epoch: {}. Train Loss: {}. Val Loss: {}'.format(epoch+1, np.mean(train_losses), np.mean(val_losses)))
print("Train Done!!!")
torch.save(model.state_dict(), 'saved_Gpu_model_state.pt')
print("Save model Done!!!")
###Output
+20%
Epoch: 1. Train Loss: 0.5629849433898926. Val Loss: 0.5188544988632202
+20%
Epoch: 2. Train Loss: 0.5105397701263428. Val Loss: 0.4985957145690918
+20%
Epoch: 3. Train Loss: 0.479385107755661. Val Loss: 0.4538874328136444
+20%
Epoch: 4. Train Loss: 0.4306584298610687. Val Loss: 0.4141905605792999
+20%
Epoch: 5. Train Loss: 0.3976493179798126. Val Loss: 0.40441033244132996
+20%
Epoch: 6. Train Loss: 0.3861732482910156. Val Loss: 0.39106255769729614
+20%
Epoch: 7. Train Loss: 0.37797415256500244. Val Loss: 0.3793518841266632
+20%
Epoch: 8. Train Loss: 0.358248233795166. Val Loss: 0.3644149899482727
+20%
Epoch: 9. Train Loss: 0.35964009165763855. Val Loss: 0.35760703682899475
+20%
Epoch: 10. Train Loss: 0.34439343214035034. Val Loss: 0.3479984700679779
+20%
Epoch: 11. Train Loss: 0.3354228138923645. Val Loss: 0.3454585671424866
+20%
Epoch: 12. Train Loss: 0.32341617345809937. Val Loss: 0.3461719751358032
+20%
Epoch: 13. Train Loss: 0.32726481556892395. Val Loss: 0.3375156819820404
+20%
Epoch: 14. Train Loss: 0.3064272105693817. Val Loss: 0.3604443669319153
+20%
Epoch: 15. Train Loss: 0.3010289669036865. Val Loss: 0.33074522018432617
Train Done!!!
Save model Done!!!
###Markdown
start time: 11:24:10 end time: 11:28:00 15epoch
###Code
state = torch.load('saved_model_state.pt',map_location={'cuda:0': 'cpu'}) #
model.load_state_dict(state) #,map_location='cpu'
###Output
_____no_output_____
###Markdown
观察训练信息
###Code
mean_train_losses
mean_val_losses
train_loss_series = pd.Series(mean_train_losses)
val_loss_series = pd.Series(mean_val_losses)
train_loss_series.plot(label="train")
val_loss_series.plot(label="validation")
plt.legend()
###Output
_____no_output_____
###Markdown
Finally we compute our IOU score for various thresholds
###Code
y_pred_true_pairs = []
for images, masks in val_loader:
images = Variable(images.cuda())
# images = Variable(images)
y_preds = model(images)
for i, _ in enumerate(images):
y_pred = y_preds[i]
y_pred = torch.sigmoid(y_pred) # Sigmoid 归一化
y_pred = y_pred.cpu().data.numpy()
y_pred_true_pairs.append((y_pred, masks[i].numpy()))
print(len(y_pred_true_pairs))
print(y_pred_true_pairs[0][0].shape)
y_pred_true_pairs[0][0]
###Output
800
(1, 128, 128)
###Markdown
We use a method to calculate the IOU score as found in this kernel here: https://www.kaggle.com/leighplt/goto-pytorch-fix-for-v0-3.
###Code
# https://www.kaggle.com/leighplt/goto-pytorch-fix-for-v0-3
for threshold in np.linspace(0, 1, 11):
ious = []
for y_pred, mask in y_pred_true_pairs:
prediction = (y_pred > threshold).astype(int)
iou = jaccard_similarity_score(mask.flatten(), prediction.flatten())
ious.append(iou)
accuracies = [np.mean(ious > iou_threshold)
for iou_threshold in np.linspace(0.5, 0.95, 10)]
print('Threshold: %.1f, Metric: %.3f' % (threshold, np.mean(accuracies)))
###Output
Threshold: 0.0, Metric: 0.118
Threshold: 0.1, Metric: 0.476
Threshold: 0.2, Metric: 0.609
Threshold: 0.3, Metric: 0.703
Threshold: 0.4, Metric: 0.746
Threshold: 0.5, Metric: 0.766
Threshold: 0.6, Metric: 0.766
Threshold: 0.7, Metric: 0.757
Threshold: 0.8, Metric: 0.735
Threshold: 0.9, Metric: 0.703
Threshold: 1.0, Metric: 0.650
###Markdown
推断测试集 Load, predict and submit the test image predictions. 准备测试数据
###Code
#--------------- Get Training Images and resize train images and masks ------------------------------
X_test = np.zeros((len(test_ids), im_height, im_width, im_chan), dtype=np.uint8)
Y_test = np.zeros((len(test_ids), im_height, im_width, 1), dtype=np.bool_)
print('Getting and resizing train images and masks ... ')
sys.stdout.flush()
for n, id_ in tqdm_notebook(enumerate(test_ids), total=len(test_ids)):
img = imread(path_test + '/images/' + id_)
if n==1:
print("raw img shape:",img.shape) # 打印其中的1张图片的原始shape # 由于颜色数组中的RGB上对应的位置都相同 故可以只保存1个通道 压缩保存
x = resize(img, (128, 128, 3), mode='constant', preserve_range=True) # 缩放至128x128x1
X_test[n] = x
print('Done!')
print('X_test:',X_test.shape)
print("Y_test:",Y_test.shape)
# Check if training data looks all right
ix = random.randint(0, len(test_ids))
plt.imshow(X_test[ix])
plt.show()
X_test_shaped = X_test.copy().reshape(-1, 3, 128, 128)/255 # 线性归一化 (0,1)区间
Y_test_shaped = Y_test.copy().reshape(-1, 1, 128, 128)
X_test_shaped = X_test_shaped.astype(np.float32)
Y_test_shaped = Y_test_shaped.astype(np.float32)
X_test_shaped=X_test_shaped.transpose(0,2,3,1)
Y_test_shaped=Y_test_shaped.transpose(0,2,3,1)
print("Before:",X_test_shaped.shape)
X_test_shaped_1=np.zeros((X_test_shaped.shape[0],X_test_shaped.shape[3],X_test_shaped.shape[1],X_test_shaped.shape[2]),dtype=np.float32)
Y_test_shaped_1=np.zeros((Y_test_shaped.shape[0],Y_test_shaped.shape[3],Y_test_shaped.shape[1],Y_test_shaped.shape[2]),dtype=np.float32)
for idx,sample in tqdm_notebook(enumerate(X_test_shaped),total=len(X_test_shaped)):
if idx==0:
print(img_transform(sample).shape) # 输入图片是【H,W,C】 转换结果是【C,H,W】
#X_test_shaped_1[idx]=img_transform(sample)
X_test_shaped_1[idx]= sample.transpose(2,0,1)
for idx,sample in tqdm_notebook(enumerate(Y_test_shaped),total=len(Y_test_shaped)):
Y_test_shaped_1[idx]=sample.transpose(2,0,1) # 不要对y进行标准归一化处理
print(X_test_shaped_1.shape)
print(X_test_shaped_1[0])
print(Y_test_shaped_1.shape)
print(Y_test_shaped_1[0])
###Output
_____no_output_____
###Markdown
[如何手动释放Python的内存](https://blog.csdn.net/ztf312/article/details/54024765)
###Code
import gc
del X_test_shaped
del Y_test_shaped
del X_test
del Y_test
gc.collect()
###Output
_____no_output_____
###Markdown
水平翻转测试集
###Code
X_test_shaped_2 =X_test_shaped_1.copy()
Y_test_shaped_2=Y_test_shaped_1.copy()
print(X_test_shaped_2 is X_test_shaped_1)
print(Y_test_shaped_2 is Y_test_shaped_1)
print(X_test_shaped_2.shape)
print(Y_test_shaped_2.shape)
def Flip(X,Y):
# X_aug = bb[:,::-1,:]
# y_aug = cc[:,::-1,:]
X_aug = X[:,:,:,::-1]
y_aug = Y[:,:,:,::-1]
return X_aug,y_aug
X_test_shaped_2,Y_test_shaped_2=Flip(X_test_shaped_2,Y_test_shaped_2)
X_test_shaped_3=np.concatenate([X_test_shaped_1,X_test_shaped_2],axis=0)
Y_test_shaped_3=np.concatenate([Y_test_shaped_1,Y_test_shaped_2],axis=0)
print(X_test_shaped_3.shape)
print(Y_test_shaped_3.shape)
###Output
(36000, 3, 128, 128)
(36000, 1, 128, 128)
###Markdown
加载待测试模型
###Code
salt_ID_dataset_test = saltIDDataset(X_test_shaped_3,
train=True, # 在bug没修正前,不要设置为False
preprocessed_masks=Y_test_shaped_3)
batch_size = 64
test_loader = torch.utils.data.DataLoader(dataset=salt_ID_dataset_test,
batch_size=batch_size,
shuffle=False)
salt_ID_dataset_test.__len__()
Mask_preds = []
count=1
for images,_ in test_loader:
count=count+1
if count%20==0:
print("Next 20 Test Batchs ...")
images = Variable(images.cuda())
masks = Variable(masks.cuda())
# images = Variable(images)
y_preds= model(images)
for i, _ in enumerate(images):
y_pred = y_preds[i]
y_pred = torch.sigmoid(y_pred) # Sigmoid 归一化
y_pred = y_pred.cpu().data.numpy()
Mask_preds.append(y_pred)
# 可以通过numpy pandas 等写入disk
print("Test Done!")
len(Mask_preds)
Mask_preds_part1=Mask_preds[0:18000].copy()
Mask_preds_part2=Mask_preds[18000:].copy()
print(len(Mask_preds_part1))
print(len(Mask_preds_part2))
def Flip_img(Mask): #[bs,c_in,H,W]
out=[]
for i in range(len(Mask)):
out.append( Mask[i][:,:,::-1].copy())
return out
Mask_preds_part2= Flip_img(Mask_preds_part2)
len(Mask_preds)
import gc
del Mask_preds
gc.collect()
Mask_preds=[]
for i in range(len(Mask_preds_part1)):
Mask_preds.append(0.5*(Mask_preds_part1[i]+Mask_preds_part2[i]))
print("平均ok,Mask_preds shape:",len(Mask_preds))
print("Mask_preds shape:",len(Mask_preds))
print(Mask_preds[0].shape)
###Output
Mask_preds shape: 18000
(1, 128, 128)
###Markdown
持久化预测的Mask_preds 以便后续探索各种阈值截断方式 
###Code
import gc
del X_test_shaped_1
del Y_test_shaped_1
gc.collect()
np.save('Mask_preds_128',Mask_preds)
import pickle
f1 = open('test_ids.pkl', 'wb')
pickle.dump(test_ids,f1,0)# 1或者2为二进制保存
f1.close()
###Output
_____no_output_____
###Markdown
制作submission.csv
###Code
f1 = open('test_ids.pkl', 'rb')
test_ids=pickle.load(f1) #重新载入
f1.close()
# 载入 mask predication
Mask_preds=np.load('Mask_preds_128.npy")
Mask_preds_101=np.zeros((len(Mask_preds),101,101),dtype=np.float)
for idx,sample in tqdm_notebook(enumerate(Mask_preds),total=len(Mask_preds)):
# 把128x128的转换到101x101
Mask_preds_101[idx] = resize(sample, (1,101, 101), mode='constant', preserve_range=True).reshape(101,101)
print("Mask_preds_101 shape:",len(Mask_preds_101))
print(Mask_preds_101[0].shape)
# Source https://www.kaggle.com/bguberfain/unet-with-depth
# 对二值化的mask矩阵进行run-length编码
def RunLengthEncode(img, order='F', format=True):
"""
img is binary mask image, shape (r,c)
order is down-then-right, i.e. Fortran
format determines if the order needs to be preformatted (according to submission rules) or not
returns run length as an array or string (if format is True)
"""
bytes = img.reshape(img.shape[0] * img.shape[1], order=order)
runs = [] ## list of run lengths
r = 0 ## the current run length
pos = 1 ## count starts from 1 per WK
for c in bytes:
if (c == 0):
if r != 0:
runs.append((pos, r))
pos += r
r = 0
pos += 1
else:
r += 1
# if last run is unsaved (i.e. data ends with 1)
if r != 0:
runs.append((pos, r))
pos += r
r = 0
if format:
z = ''
for rr in runs:
z += '{} {} '.format(rr[0], rr[1])
return z[:-1]
else:
return runs
threshold=0.8
pred_dict = {fn[:-4]:RunLengthEncode((Mask_preds_101[i]>threshold).astype(np.bool_)) for i,fn in tqdm_notebook(enumerate(test_ids),total=len(Mask_preds_101))}
sub = pd.DataFrame.from_dict(pred_dict,orient='index')
sub.index.names = ['id']
sub.columns = ['rle_mask']
sub.to_csv('submission.csv')
###Output
_____no_output_____ |
tweetreceive.ipynb | ###Markdown
Next, set up the API and load the model
###Code
auth = tweepy.OAuthHandler(os.environ.get('API_KEY'), os.environ.get('API_SECRET'))
auth.set_access_token(os.environ.get('ACCESS_TOKEN'), os.environ.get('ACCESS_SECRET'))
api = tweepy.API(auth)
bert_model_path = "sentiment140_bert"
bert_model = tf.saved_model.load(bert_model_path)
def bert_preprocess(text):
pat1 = r'@[A-Za-z0-9]+'
pat2 = r'https?://[A-Za-z0-9./]+'
combined_pat = r'|'.join((pat1, pat2))
stripped = re.sub(combined_pat, '', text)
try:
clean = stripped.decode("utf-8-sig").replace(u"\ufffd", "?")
except:
clean = stripped
letters_only = re.sub("[^a-zA-Z]", " ", clean)
lower_case = letters_only.lower()
# During the letters_only process two lines above, it has created unnecessay white spaces,
# I will tokenize and join together to remove unneccessary white spaces
return lower_case.strip()
preprocess = np.vectorize(bert_preprocess)
def getTweets(username):
"""
Input: username
Returns: List of tweets in last 30 days
"""
thirty_earlier = datetime.datetime.utcnow()-datetime.timedelta(30)
tweets = []
for status in tweepy.Cursor(api.user_timeline,id=username).items():
if status.created_at > thirty_earlier:
tweets.append(status.text)
else:
break
preprocessed = preprocess(np.array(tweets))
predictions = tf.sigmoid(bert_model(tf.constant(preprocessed))) > 0.5
return np.mean(predictions)
getTweets('joebiden')
###Output
_____no_output_____ |
examples/ruGPT3XL_generation.ipynb | ###Markdown
Install env
###Code
!export CUDA_HOME=/usr/local/cuda-10.1
!git clone https://github.com/NVIDIA/apex
!pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./apex
!apt-get install llvm-9-dev
!pip install cpufeature
!pip install triton==0.2.3
!DS_BUILD_CPU_ADAM=1 DS_BUILD_SPARSE_ATTN=1 pip install deepspeed==0.3.7
!ds_report
# And this cell should be run without errors
import deepspeed.ops.sparse_attention.sparse_attn_op
!git clone https://github.com/php4nuke/ru-gpts.git
!pip install transformers==3.5.1
!pip install natsort
###Output
_____no_output_____
###Markdown
Load model
###Code
import warnings
warnings.filterwarnings("ignore")
import sys
sys.path.append("ru-gpts/gw")
from generation_wrapper import RuGPT3XL
gpt = RuGPT3XL.from_pretrained("sberbank-ai/rugpt3xl", seq_len=1024)
###Output
_____no_output_____
###Markdown
Simple generation
###Code
def filter_resuls(nr):
return [x[:x.find("<|endoftext|>")] for x in nr]
import requests
t_in = requests.post('https://gpt3.000webhostapp.com/gpt.php', data = {'txt':''})
while t_in.text != "":
t_out = filter_resuls(gpt.generate(
t_in.text,
min_length=300,
max_length=500,
temperature=0.8,
no_repeat_ngram_size=3,
repetition_penalty=2.0,
))
t_in = requests.post('https://gpt3.000webhostapp.com/gpt.php', data = {'txt':t_out})
###Output
_____no_output_____
###Markdown
Install env Install Apex
###Code
%%writefile setup.sh
export CUDA_HOME=/usr/local/cuda-10.1
git clone https://github.com/NVIDIA/apex
pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./apex
!sh setup.sh
###Output
_____no_output_____
###Markdown
Install triton
###Code
!apt-get install llvm-9-dev
!pip install cpufeature
!pip install triton==0.2.3
###Output
_____no_output_____
###Markdown
Install DeepSpeed
###Code
!DS_BUILD_CPU_ADAM=1 DS_BUILD_SPARSE_ATTN=1 pip install deepspeed==0.3.7
###Output
_____no_output_____
###Markdown
Test installation: we should have the following output
###Code
!ds_report
# And this cell should be run without errors
import deepspeed.ops.sparse_attention.sparse_attn_op
###Output
_____no_output_____
###Markdown
Download repo and install other libs
###Code
!git clone https://github.com/sberbank-ai/ru-gpts
!pip install transformers==3.5.1
!pip install natsort
###Output
_____no_output_____
###Markdown
Test model Load model
###Code
import warnings
warnings.filterwarnings("ignore")
import sys
sys.path.append("ru-gpts/")
import os
os.environ["USE_DEEPSPEED"] = "1"
from src.xl_wrapper import RuGPT3XL
###Output
_____no_output_____
###Markdown
Note! seq_len is max sequence length for generation used in generation process. Max avialable seq_len is 2048 (in tokens).Also inference takes around 10 Gb GPU memory.
###Code
gpt = RuGPT3XL.from_pretrained("sberbank-ai/rugpt3xl", seq_len=512)
###Output
> initializing model parallel with size 1
> initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234
Use alternating sparse & dense attention layers
###Markdown
Get logits
###Code
logits = gpt("Кто был президентом США в 2020? ").logits
type(logits), logits.shape
###Output
_____no_output_____
###Markdown
Get loss
###Code
input_ids = [gpt.tokenizer("Кто был президентом США в 2020? ")['input_ids']]
labels = input_ids
import torch
with torch.no_grad():
loss = gpt(input_ids=input_ids, labels=labels).loss
loss
###Output
_____no_output_____
###Markdown
Simple generation
###Code
def filter_resuls(nr):
return [x[:x.find("<|endoftext|>")] for x in nr]
###Output
_____no_output_____
###Markdown
Greedy decoding
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ",
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
sample
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ", do_sample=True, num_return_sequences=5,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Top_k top_p filtering
###Code
filter_resuls(gpt.generate(
"Александр Сергеевич Пушкин родился в ",
top_k=5,
top_p=0.95,
temperature=1.2,
num_return_sequences=5,
do_sample=True,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Beamsearch
###Code
filter_resuls(gpt.generate(
text="Александр Сергеевич Пушкин родился в ",
max_length=50,
num_beams=10,
no_repeat_ngram_size=3,
repetition_penalty=2.,
num_return_sequences=5,
))
###Output
_____no_output_____
###Markdown
Load model
###Code
%load_ext autoreload
%autoreload 2
import warnings
warnings.filterwarnings("ignore")
import sys
sys.path.append("../gw")
from generation_wrapper import RuGPT3XL
###Output
_____no_output_____
###Markdown
Note! seq_len is max sequence length for generation used in generation process. Max avialable seq_len is 2048 (in tokens).Also inference takes around 10 Gb GPU memory.
###Code
gpt = RuGPT3XL.from_pretrained("sberbank-ai/rugpt3xl", seq_len=512)
###Output
> initializing model parallel with size 1
> initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234
Use alternating sparse & dense attention layers
###Markdown
Get logits
###Code
logits = gpt("Кто был президентом США в 2020? ").logits
type(logits), logits.shape
###Output
_____no_output_____
###Markdown
Get loss
###Code
input_ids = [gpt.tokenizer("Кто был президентом США в 2020? ")['input_ids']]
labels = input_ids
import torch
with torch.no_grad():
loss = gpt(input_ids=input_ids, labels=labels).loss
loss
###Output
_____no_output_____
###Markdown
Simple generation
###Code
def filter_resuls(nr):
return [x[:x.find("<|endoftext|>")] for x in nr]
###Output
_____no_output_____
###Markdown
Greedy decoding
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ",
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
sample
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ", do_sample=True, num_return_sequences=5,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Top_k top_p filtering
###Code
filter_resuls(gpt.generate(
"Александр Сергеевич Пушкин родился в ",
top_k=5,
top_p=0.95,
temperature=1.2,
num_return_sequences=5,
do_sample=True,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Beamsearch
###Code
filter_resuls(gpt.generate(
text="Александр Сергеевич Пушкин родился в ",
max_length=50,
num_beams=10,
no_repeat_ngram_size=3,
repetition_penalty=2.,
num_return_sequences=5,
))
###Output
_____no_output_____
###Markdown
Install env
###Code
import subprocess
CUDA_version = [s for s in subprocess.check_output(["nvcc", "--version"]).decode("UTF-8").split(", ") if s.startswith("release")][0].split(" ")[-1]
print("CUDA version:", CUDA_version)
if CUDA_version == "10.0":
torch_version_suffix = "+cu100"
elif CUDA_version == "10.1":
torch_version_suffix = "+cu101"
elif CUDA_version == "10.2":
torch_version_suffix = ""
else:
torch_version_suffix = "+cu110"
###Output
CUDA version: 11.0
###Markdown
If code below doesn't work, check your cuda version and installation here https://pytorch.org/get-started/previous-versions/
###Code
!pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
###Output
_____no_output_____
###Markdown
Install Apex
###Code
%%writefile setup.sh
git clone https://github.com/NVIDIA/apex
cd apex
pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
!sh setup.sh
###Output
_____no_output_____
###Markdown
Install triton
###Code
!apt-get install llvm-9-dev
!pip install cpufeature
!pip install triton==0.2.3
###Output
_____no_output_____
###Markdown
Install DeepSpeed
###Code
!DS_BUILD_CPU_ADAM=1 DS_BUILD_SPARSE_ATTN=1 pip install deepspeed==0.3.7
###Output
_____no_output_____
###Markdown
Test installation: we should have the following output
###Code
!ds_report
# And this cell should be run without errors
import deepspeed.ops.sparse_attention.sparse_attn_op
###Output
_____no_output_____
###Markdown
Download repo and install other libs
###Code
!git clone https://github.com/sberbank-ai/ru-gpts
!pip install transformers==3.5.1
!pip install natsort
###Output
Requirement already satisfied: natsort in /usr/local/lib/python3.7/dist-packages (5.5.0)
###Markdown
After installing all packages we reccomend u to restart runtime. Test model Load model
###Code
import warnings
warnings.filterwarnings("ignore")
import sys
sys.path.append("ru-gpts/")
import os
os.environ["USE_DEEPSPEED"] = "1"
from src.xl_wrapper import RuGPT3XL
###Output
_____no_output_____
###Markdown
Note! seq_len is max sequence length for generation used in generation process. Max avialable seq_len is 2048 (in tokens).Also inference takes around 10 Gb GPU memory.
###Code
gpt = RuGPT3XL.from_pretrained("sberbank-ai/rugpt3xl", seq_len=512)
###Output
> initializing model parallel with size 1
> initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234
###Markdown
Get logits
###Code
logits = gpt("Кто был президентом США в 2020? ").logits
type(logits), logits.shape
###Output
_____no_output_____
###Markdown
Get loss
###Code
input_ids = [gpt.tokenizer("Кто был президентом США в 2020? ")['input_ids']]
labels = input_ids
import torch
with torch.no_grad():
loss = gpt(input_ids=input_ids, labels=labels).loss
loss
###Output
_____no_output_____
###Markdown
Simple generation
###Code
def filter_resuls(nr):
return [x[:x.find("<|endoftext|>")] for x in nr]
###Output
_____no_output_____
###Markdown
Greedy decoding
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ",
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
sample
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ", do_sample=True, num_return_sequences=5,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Top_k top_p filtering
###Code
filter_resuls(gpt.generate(
"Александр Сергеевич Пушкин родился в ",
top_k=5,
top_p=0.95,
temperature=1.2,
num_return_sequences=5,
do_sample=True,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Beamsearch
###Code
filter_resuls(gpt.generate(
text="Александр Сергеевич Пушкин родился в ",
max_length=50,
num_beams=10,
no_repeat_ngram_size=3,
repetition_penalty=2.,
num_return_sequences=5,
))
###Output
_____no_output_____
###Markdown
Install env Install Apex
###Code
%%writefile setup.sh
export CUDA_HOME=/usr/local/cuda-10.1
git clone https://github.com/NVIDIA/apex
pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./apex
!sh setup.sh
###Output
_____no_output_____
###Markdown
Install triton
###Code
!apt-get install llvm-9-dev
!pip install cpufeature
!pip install triton==0.2.3
###Output
_____no_output_____
###Markdown
Install DeepSpeed
###Code
!DS_BUILD_CPU_ADAM=1 DS_BUILD_SPARSE_ATTN=1 pip install deepspeed==0.3.7
###Output
_____no_output_____
###Markdown
Test installation: we should have the following output
###Code
!ds_report
# And this cell should be run without errors
import deepspeed.ops.sparse_attention.sparse_attn_op
###Output
_____no_output_____
###Markdown
Download repo and install other libs
###Code
!git clone https://github.com/sberbank-ai/ru-gpts.git
!pip install transformers==3.5.1
!pip install natsort
###Output
_____no_output_____
###Markdown
Test model Load model
###Code
import warnings
warnings.filterwarnings("ignore")
import sys
sys.path.append("ru-gpts/gw")
from generation_wrapper import RuGPT3XL
###Output
_____no_output_____
###Markdown
Note! seq_len is max sequence length for generation used in generation process. Max avialable seq_len is 2048 (in tokens).Also inference takes around 10 Gb GPU memory.
###Code
gpt = RuGPT3XL.from_pretrained("sberbank-ai/rugpt3xl", seq_len=512)
###Output
> initializing model parallel with size 1
> initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234
Use alternating sparse & dense attention layers
###Markdown
Get logits
###Code
logits = gpt("Кто был президентом США в 2020? ").logits
type(logits), logits.shape
###Output
_____no_output_____
###Markdown
Get loss
###Code
input_ids = [gpt.tokenizer("Кто был президентом США в 2020? ")['input_ids']]
labels = input_ids
import torch
with torch.no_grad():
loss = gpt(input_ids=input_ids, labels=labels).loss
loss
###Output
_____no_output_____
###Markdown
Simple generation
###Code
def filter_resuls(nr):
return [x[:x.find("<|endoftext|>")] for x in nr]
###Output
_____no_output_____
###Markdown
Greedy decoding
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ",
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
sample
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ", do_sample=True, num_return_sequences=5,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Top_k top_p filtering
###Code
filter_resuls(gpt.generate(
"Александр Сергеевич Пушкин родился в ",
top_k=5,
top_p=0.95,
temperature=1.2,
num_return_sequences=5,
do_sample=True,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Beamsearch
###Code
filter_resuls(gpt.generate(
text="Александр Сергеевич Пушкин родился в ",
max_length=50,
num_beams=10,
no_repeat_ngram_size=3,
repetition_penalty=2.,
num_return_sequences=5,
))
###Output
_____no_output_____
###Markdown
Install lib
###Code
%%bash
rm -rf /usr/local/cuda
ln -s /usr/local/cuda-10.1 /usr/local/cuda
!nvcc --version
!stat /usr/local/cuda
!pip uninstall triton
!pip uninstall torch
%%bash
export LD_LIBRARY_PATH=/usr/lib/
!apt-get install clang-9 llvm-9 llvm-9-dev llvm-9-tools
!pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
%%writefile setup.sh
git clone https://github.com/NVIDIA/apex
cd apex
pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
!sh setup.sh
!pip install triton==0.2.3
# !pip uninstall -y typing
!pip install cpufeature
!DS_BUILD_CPU_ADAM=1 DS_BUILD_SPARSE_ATTN=1 pip install deepspeed==0.3.7
!ds_report
import deepspeed.ops.sparse_attention.sparse_attn_op
!rm -rf ru-gpts
!git clone https://github.com/sberbank-ai/ru-gpts
!pip install transformers==3.5.1
!cp ru-gpts/src_utils/trainer_pt_utils.py /usr/local/lib/python3.7/dist-packages/transformers/trainer_pt_utils.py
###Output
_____no_output_____
###Markdown
Test model Load model
###Code
import warnings
warnings.filterwarnings("ignore")
import sys
sys.path.append("ru-gpts/")
import os
os.environ["USE_DEEPSPEED"] = "1"
from src.xl_wrapper import RuGPT3XL
###Output
_____no_output_____
###Markdown
Note! seq_len is max sequence length for generation used in generation process. Max avialable seq_len is 2048 (in tokens).Also inference takes around 10 Gb GPU memory.
###Code
# gpt = RuGPT3XL.from_pretrained("sberbank-ai/rugpt3xl", seq_len=512)
gpt = RuGPT3XL.from_pretrained("sberbank-ai/rugpt3xl", seq_len=512)
# model parallel group is not initialized - если не подключена gpu
# gpt.model = gpt.model.float()
###Output
_____no_output_____
###Markdown
Get logits
###Code
logits = gpt("Кто был президентом США в 2020? ").logits
type(logits), logits.shape
###Output
_____no_output_____
###Markdown
Get loss
###Code
input_ids = [gpt.tokenizer("Кто был президентом США в 2020? ")['input_ids']]
labels = input_ids
import torch
with torch.no_grad():
loss = gpt(input_ids=input_ids, labels=labels).loss
loss
###Output
_____no_output_____
###Markdown
Simple generation
###Code
def filter_resuls(nr):
return [x[:x.find("<|endoftext|>")] for x in nr]
###Output
_____no_output_____
###Markdown
Greedy decoding
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ",
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
sample
###Code
filter_resuls(gpt.generate(
"Кто был президентом США в 2020? ", do_sample=True, num_return_sequences=5,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Top_k top_p filtering
###Code
filter_resuls(gpt.generate(
"Александр Сергеевич Пушкин родился в ",
top_k=5,
top_p=0.95,
temperature=1.2,
num_return_sequences=5,
do_sample=True,
max_length=50,
no_repeat_ngram_size=3,
repetition_penalty=2.,
))
###Output
_____no_output_____
###Markdown
Beamsearch
###Code
filter_resuls(gpt.generate(
text="Александр Сергеевич Пушкин родился в ",
max_length=50,
num_beams=10,
no_repeat_ngram_size=3,
repetition_penalty=2.,
num_return_sequences=5,
))
###Output
_____no_output_____ |
PaddleOCR_.ipynb | ###Markdown
###Code
!python -m pip install paddlepaddle-gpu==2.0.0 -i https://mirror.baidu.com/pypi/simple
!pip install paddleocr
!git clone https://github.com/PaddlePaddle/PaddleOCR
from paddleocr import PaddleOCR, draw_ocr # main OCR dependencies
from matplotlib import pyplot as plt # plot images
import cv2 #opencv
import os # folder directory navigation
ocr_model = PaddleOCR(lang='en')
img_path = os.path.join('.', '/content/ocr.jpg')
result = ocr_model.ocr(img_path)
result
for res in result:
print(res[1][0])
# Extracting detected components
boxes = [res[0] for res in result] #
texts = [res[1][0] for res in result]
scores = [res[1][1] for res in result]
font_path = os.path.join('PaddleOCR', 'doc', 'fonts', 'latin.ttf')
img = cv2.imread(img_path)
# reorders the color channels
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# resizing display area
plt.figure(figsize=(15,15))
# draw annotations on image
annotated = draw_ocr(img, boxes, texts, scores, font_path=font_path)
# show the image using matplotlib
plt.imshow(annotated)
img.shape
###Output
_____no_output_____ |
16_reinforcement_learning.ipynb | ###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exersices in chapter 16. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import sys
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
Note: there may be minor differences between the output of this notebook and the examples shown in the book. You can safely ignore these differences. They are mainly due to the fact that most of the environments provided by OpenAI gym have some randomness. Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
_____no_output_____
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation (it's a bit jittery within Jupyter):
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. Fixing the rendering issue Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebookIf Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function
except Exception:
openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# use OpenAI gym's rendering function
return env.render(mode="rgb_array")
else:
# rendering for the cart pole environment (in case OpenAI gym can't do it)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # Blue Green Red
pole_col = 0x669acc # Blue Green Red
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.The main differences relevant to this chapter are:* the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).* the `weights` parameter was renamed to `kernel`,* the default activation is `None` instead of `tf.nn.relu`
###Code
import tensorflow as tf
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.contrib.layers.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
/opt/conda/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_basic.ckpt
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 25
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./ckpts/my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_pg.ckpt
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning Q-Learning works by watching an agent play (e.g., randomly) and gradually improving its estimates of the Q-Values. Once it has accurate Q-Value estimates (or close enough), then the optimal policy consists in choosing the action that has the highest Q-Value (i.e., the greedy policy).
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to Play MsPacman Using the DQN Algorithm **Warning**: Unfortunately, the first version of the book contained two important errors in this section.1. The actor DQN and critic DQN should have been named _online DQN_ and _target DQN_ respectively. Actor-critic algorithms are a distinct class of algorithms.2. The online DQN is the one that learns and is copied to the target DQN at regular intervals. The target DQN's only role is to estimate the next state's Q-Values for each possible action. This is needed to compute the target Q-Values for training the online DQN, as shown in this equation:$y(s,a) = \text{r} + \gamma . \underset{a'}{\max} \, Q_\text{target}(s', a')$* $y(s,a)$ is the target Q-Value to train the online DQN for the state-action pair $(s, a)$.* $r$ is the reward actually collected after playing action $a$ in state $s$.* $\gamma$ is the discount rate.* $s'$ is the state actually reached after played action $a$ in state $s$.* $a'$ is one of the possible actions in state $s'$.* $Q_\text{target}(s', a')$ is the target DQN's estimate of the Q-Value of playing action $a'$ while in state $s'$.I hope these errors did not affect you, and if they did, I sincerely apologize. Creating the MsPacman environment
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = 210 + 164 + 74
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.sum(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img // 3 - 128).astype(np.int8) # normalize from -128 to 127
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
img.shape
###Output
_____no_output_____
###Markdown
Note: the `preprocess_observation()` function is slightly different from the one in the book: instead of representing pixels as 64-bit floats from -1.0 to 1.0, it represents them as signed bytes (from -128 to 127). The benefit is that the replay memory will take up roughly 8 times less RAM (about 6.5 GB instead of 52 GB). The reduced precision has no visible impact on training.
###Code
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN Note: instead of using `tf.contrib.layers.convolution2d()` or `tf.contrib.layers.conv2d()` (as in the first version of the book), we now use the `tf.layers.conv2d()`, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:* the `num_outputs` parameter was renamed to `filters`,* the `stride` parameter was renamed to `strides`,* the `_fn` suffix was removed from parameter names that had it (e.g., `activation_fn` was renamed to `activation`),* the `weights_initializer` parameter was renamed to `kernel_initializer`,* the weights variable was renamed to `"kernel"` (instead of `"weights"`), and the biases variable was renamed from `"biases"` to `"bias"`,* and the default `activation` is now `None` instead of `tf.nn.relu`.
###Code
tf.device()
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9 discrete actions are available
initializer = tf.contrib.layers.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state / 128.0 # scale pixel intensities to the [-1.0, 1.0] range.
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keep_dims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Note: in the first version of the book, the loss function was simply the squared error between the target Q-Values (`y`) and the estimated Q-Values (`q_value`). However, because the experiences are very noisy, it is better to use a quadratic loss only for small errors (below 1.0) and a linear loss (twice the absolute error) for larger errors, which is what the code above computes. This way large errors don't push the model parameters around as much. Note that we also tweaked some hyperparameters (using a smaller learning rate, and using Nesterov Accelerated Gradients rather than Adam optimization, since adaptive gradient algorithms may sometimes be bad, according to this [paper](https://arxiv.org/abs/1705.08292)). We also tweaked a few other hyperparameters below (a larger replay memory, longer decay for the $\epsilon$-greedy policy, larger discount rate, less frequent copies of the online DQN to the target DQN, etc.). We use this `ReplayMemory` class instead of a `deque` because it is much faster for random access (thanks to @NileshPS who contributed it). Moreover, we default to sampling with replacement, which is much faster than sampling without replacement for large replay memories.
###Code
class ReplayMemory:
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
def sample(self, batch_size, with_replacement=True):
if with_replacement:
indices = np.random.randint(self.length, size=batch_size) # faster
else:
indices = np.random.permutation(self.length)[:batch_size]
return self.buf[indices]
replay_memory_size = 500000
replay_memory = ReplayMemory(replay_memory_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 4000000 # total number of training steps
training_start = 10000 # start training after 10,000 game iterations
training_interval = 4 # run a training step every 4 game iterations
save_steps = 1000 # save the model every 1,000 training steps
copy_steps = 10000 # copy online DQN to target DQN every 10,000 training steps
discount_rate = 0.99
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "ckpts/my_dqn.ckpt"
done = True # env needs to be reset
###Output
_____no_output_____
###Markdown
A few variables for tracking progress:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
And now the main training loop!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f})%\tLoss {:5f}\tMean Max-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Online DQN plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# Compute statistics for tracking progress (not shown in the book)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # only train after warmup period and at regular intervals
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# Train the online DQN
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy the online DQN to the target DQN
if step % copy_steps == 0:
copy_online_to_target.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
Iteration 26000 Training step 4000/4000000 (0.1)% Loss 0.006427 Mean Max-Q 0.764066
###Markdown
You can interrupt the cell above at any time to test your agent using the cell below. You can then run the cell above once again, it will load the last parameters saved and resume training.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# Online DQN plays
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Extra material Preprocessing for Breakout Here is a preprocessing function you can use to train a DQN for the Breakout-v0 Atari game:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # crop and downsize
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (80×80 grayscale)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, a single image does not give you the direction and speed of the ball, which are crucial informations for playing this game. For this reason, it is best to actually combine several consecutive observations to create the environment's state representation. One way to do that is to create a multi-channel image, with one channel per recent observation. Another is to merge all recent observations into a single-channel image, using `np.max()`. In this case, we need to dim the older images so that the DQN can distinguish the past from the present.
###Code
from collections import deque
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Multichannel state")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("Singlechannel state")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Exercise solutions 1. to 7. See Appendix A. 8. BipedalWalker-v2 Exercise: _Use policy gradients to tackle OpenAI gym's "BipedalWalker-v2"._
###Code
import gym
env = gym.make("BipedalWalker-v2")
###Output
_____no_output_____
###Markdown
Note: if you run into [this issue](https://github.com/openai/gym/issues/100) ("`module 'Box2D._Box2D' has no attribute 'RAND_LIMIT'`") when making the `BipedalWalker-v2` environment, then try this workaround:```$ pip uninstall Box2D-kengz$ pip install git+https://github.com/pybox2d/pybox2d```
###Code
obs = env.reset()
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
obs
###Output
_____no_output_____
###Markdown
You can find the meaning of each of these 24 numbers in the [documentation](https://github.com/openai/gym/wiki/BipedalWalker-v2).
###Code
env.action_space
env.action_space.low
env.action_space.high
###Output
_____no_output_____
###Markdown
This is a 4D continuous action space controling each leg's hip torque and knee torque (from -1 to 1). To deal with a continuous action space, one method is to discretize it. For example, let's limit the possible torque values to these 3 values: -1.0, 0.0, and 1.0. This means that we are left with $3^4=81$ possible actions.
###Code
from itertools import product
possible_torques = np.array([-1.0, 0.0, 1.0])
possible_actions = np.array(list(product(possible_torques, possible_torques, possible_torques, possible_torques)))
possible_actions.shape
tf.reset_default_graph()
# 1. Specify the network architecture
n_inputs = env.observation_space.shape[0] # == 24
n_hidden = 10
n_outputs = len(possible_actions) # == 625
initializer = tf.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.selu,
kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
outputs = tf.nn.softmax(logits)
# 3. Select a random action based on the estimated probabilities
action_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=-1)
# 4. Training
learning_rate = 0.01
y = tf.one_hot(action_index, depth=len(possible_actions))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Let's try running this policy network, although it is not trained yet.
###Code
def run_bipedal_walker(model_path=None, n_max_steps = 1000):
env = gym.make("BipedalWalker-v2")
frames = []
with tf.Session() as sess:
if model_path is None:
init.run()
else:
saver.restore(sess, model_path)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_index_val = action_index.eval(feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
if done:
break
env.close()
return frames
frames = run_bipedal_walker()
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, it really can't walk. So let's train it!
###Code
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 1000
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}/{}".format(iteration + 1, n_iterations), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_index_val, gradients_val = sess.run([action_index, gradients],
feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_bipedal_walker_pg.ckpt")
frames = run_bipedal_walker("./my_bipedal_walker_pg.ckpt")
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
**16장 – 강화 학습** _이 노트북은 15장에 있는 모든 샘플 코드와 연습문제 해답을 가지고 있습니다._ 설정 파이썬 2와 3을 모두 지원합니다. 공통 모듈을 임포트하고 맷플롯립 그림이 노트북 안에 포함되도록 설정하고 생성한 그림을 저장하기 위한 함수를 준비합니다:
###Code
# 파이썬 2와 파이썬 3 지원
from __future__ import division, print_function, unicode_literals
# 공통
import numpy as np
import os
import sys
# 일관된 출력을 위해 유사난수 초기화
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# 맷플롯립 설정
from IPython.display import HTML
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# 한글출력
plt.rcParams['font.family'] = 'NanumBarunGothic'
plt.rcParams['axes.unicode_minus'] = False
# 그림을 저장할 폴더
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
OpenAI 짐(gym) 이 노트북에서는 강화 학습 알고리즘을 개발하고 비교할 수 있는 훌륭한 도구인 [OpenAI 짐(gym)](https://gym.openai.com/)을 사용합니다. 짐은 *에이전트*가 학습할 수 있는 많은 환경을 제공합니다. `gym`을 임포트해 보죠:
###Code
import gym
###Output
_____no_output_____
###Markdown
그다음 MsPacman 환경 버전 0을 로드합니다.
###Code
env = gym.make('MsPacman-v0')
###Output
_____no_output_____
###Markdown
`reset()` 메서드를 호출하여 환경을 초기화합니다. 이 메서드는 하나의 관측을 반환합니다:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
관측은 환경마다 다릅니다. 여기에서는 [width, height, channels] 크기의 3D 넘파이 배열로 저장되어 있는 RGB 이미지입니다(채널은 3개로 빨강, 초록, 파랑입니다). 잠시 후에 보겠지만 다른 환경에서는 다른 오브젝트가 반환될 수 있습니다.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
환경은 `render()` 메서드를 사용하여 화면에 나타낼 수 있고 렌더링 모드를 고를 수 있습니다(렌더링 옵션은 환경마다 다릅니다). 이 경우에는 `mode="rgb_array"`로 지정해서 넘파이 배열로 환경에 대한 이미지를 받겠습니다:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
이미지를 그려보죠:
###Code
plt.figure(figsize=(5,6))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
1980년대로 돌아오신 걸 환영합니다! :) 이 환경에서는 렌더링된 이미지가 관측과 동일합니다(하지만 많은 경우에 그렇지 않습니다):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
환경을 그리기 위한 유틸리티 함수를 만들겠습니다:
###Code
def plot_environment(env, figsize=(5,6)):
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
환경을 어떻게 다루는지 보겠습니다. 에이전트는 "행동 공간"(가능한 행동의 모음)에서 하나의 행동을 선택합니다. 이 환경의 액션 공간을 다음과 같습니다:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)`는 가능한 행동이 정수 0에서부터 8까지있다는 의미입니다. 이는 조이스틱의 9개의 위치(0=중앙, 1=위, 2=오른쪽, 3=왼쪽, 4=아래, 5=오른쪽위, 6=왼쪽위, 7=오른쪽아래, 8=왼쪽아래)에 해당합니다. 그다음 환경에게 플레이할 행동을 알려주고 게임의 다음 단계를 진행시킵니다. 왼쪽으로 110번을 진행하고 왼쪽아래로 40번을 진행해 보겠습니다:
###Code
env.reset()
for step in range(110):
env.step(3) #왼쪽
for step in range(40):
env.step(8) #왼쪽아래
###Output
_____no_output_____
###Markdown
어디에 있을까요?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
사실 `step()` 함수는 여러 개의 중요한 객체를 반환해 줍니다:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
앞서 본 것처럼 관측은 보이는 환경을 설명합니다. 여기서는 210x160 RGB 이미지입니다:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
환경은 마지막 스텝에서 받을 수 있는 보상을 알려 줍니다:
###Code
reward
###Output
_____no_output_____
###Markdown
게임이 종료되면 환경은 `done=True`를 반환합니다:
###Code
done
###Output
_____no_output_____
###Markdown
마지막으로 `info`는 환경의 내부 상태에 관한 추가 정보를 제공하는 딕셔너리입니다. 디버깅에는 유용하지만 에이전트는 학습을 위해서 이 정보를 사용하면 안됩니다(학습이 아니고 속이는 셈이므로).
###Code
info
###Output
_____no_output_____
###Markdown
10번의 스텝마다 랜덤한 방향을 선택하는 식으로 전체 게임(3개의 팩맨)을 플레이하고 각 프레임을 저장해 보겠습니다:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
이제 애니메이션으로 한번 보죠:
###Code
def update_scene(num, frames, patch):
plt.close() # 이전 그래프를 닫지 않으면 두 개의 그래프가 출력되는 matplotlib의 버그로 보입니다.
patch.set_data(frames[num])
return patch,
def plot_animation(frames, figsize=(5,6), repeat=False, interval=40):
fig = plt.figure(figsize=figsize)
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch),
frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
환경을 더 이상 사용하지 않으면 환경을 종료하여 자원을 반납합니다:
###Code
env.close()
###Output
_____no_output_____
###Markdown
첫 번째 에이전트를 학습시키기 위해 간단한 Cart-Pole 환경을 사용하겠습니다. 간단한 Cart-Pole 환경 Cart-Pole은 아주 간단한 환경으로 왼쪽이나 오른쪽으로 움직일 수 있는 카트와 카트 위에 수직으로 서 있는 막대로 구성되어 있습니다. 에이전트는 카트를 왼쪽이나 오른쪽으로 움직여서 막대가 넘어지지 않도록 유지시켜야 합니다.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
관측은 4개의 부동소수로 구성된 1D 넘파이 배열입니다. 각각 카트의 수평 위치, 속도, 막대의 각도(0=수직), 각속도를 나타냅니다. 이 환경을 렌더링하려면 먼저 몇 가지 이슈를 해결해야 합니다. 렌더링 이슈 해결하기 일부 환경(Cart-Pole을 포함하여)은 `rgb_array` 모드를 설정하더라도 별도의 창을 띄우기 위해 디스플레이 접근이 필수적입니다. 일반적으로 이 창을 무시하면 됩니다. 주피터가 헤드리스(headless) 서버로 (즉 스크린이 없이) 실행중이면 예외가 발생합니다. 이를 피하는 한가지 방법은 Xvfb 같은 가짜 X 서버를 설치하는 것입니다. `xvfb-run` 명령을 사용해 주피터를 실행합니다: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebook 주피터가 헤드리스 서버로 실행 중이지만 Xvfb를 설치하기 번거롭다면 Cart-Pole에 대해서는 다음 렌더링 함수를 사용할 수 있습니다:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # 문제없음, OpenAI 짐의 렌더링 함수를 사용합니다
except Exception:
openai_cart_pole_rendering = False # 가능한 X 서버가 없다면, 자체 렌더링 함수를 사용합니다
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# OpenAI 짐의 렌더링 함수를 사용합니다
return env.render(mode="rgb_array")
else:
# Cart-Pole 환경을 위한 렌더링 (OpenAI 짐이 처리할 수 없는 경우)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # 파랑 초록 빨강
pole_col = 0x669acc # 파랑 초록 빨강
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
행동 공간을 확인해 보겠습니다:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
네 딱 두 개의 행동이 있네요. 왼쪽이나 오른쪽 방향으로 가속합니다. 막대가 넘어지기 전까지 카트를 왼쪽으로 밀어보죠:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
막대가 실제로 넘어지지 않더라도 너무 기울어지면 게임이 끝납니다. 환경을 다시 초기화하고 이번에는 오른쪽으로 밀어보겠습니다:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
아까 말했던 것과 같은 상황인 것 같습니다. 어떻게 막대가 똑 바로 서있게 만들 수 있을까요? 이를 위한 *정책*을 만들어야 합니다. 이 정책은 에이전트가 각 스텝에서 행동을 선택하기 위해 사용할 전략입니다. 어떤 행동을 할지 결정하기 위해 지난 행동이나 관측을 사용할 수 있습니다. 하드 코딩 정책 간단한 정책을 하드 코딩해 보겠습니다. 막대가 왼쪽으로 기울어지면 카트를 왼쪽으로 밀고 반대의 경우는 오른쪽으로 밉니다. 작동이 되는지 확인해 보죠:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
아니네요, 불안정해서 몇 번 움직이고 막대가 너무 기울어져 게임이 끝났습니다. 더 똑똑한 정책이 필요합니다! 신경망 정책 관측을 입력으로 받고 각 관측에 대해 선택할 행동을 출력하는 신경망을 만들어 보겠습니다. 행동을 선택하기 위해 네트워크는 먼저 각 행동에 대한 확률을 추정하고 그다음 추정된 확률을 기반으로 랜덤하게 행동을 선택합니다. Cart-Pole 환경의 경우에는 두 개의 행동(왼쪽과 오른쪽)이 있으므로 하나의 출력 뉴런만 있으면 됩니다. 행동 0(왼쪽)에 대한 확률 `p`를 출력할 것입니다. 행동 1(오른쪽)에 대한 확률은 `1 - p`가 됩니다.
###Code
import tensorflow as tf
# 1. 네트워크 구조를 설정합니다
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # 간단한 작업이므로 너무 많은 뉴런이 필요하지 않습니다
n_outputs = 1 # 왼쪽으로 가속할 확률을 출력합니다
initializer = tf.variance_scaling_initializer()
# 2. 네트워크를 만듭니다
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. 추정된 확률을 기반으로 랜덤하게 행동을 선택합니다
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
이 환경은 각 관측이 환경의 모든 상태를 포함하고 있기 때문에 지난 행동과 관측은 무시해도 괜찮습니다. 숨겨진 상태가 있다면 이 정보를 추측하기 위해 이전 행동과 상태를 고려해야 합니다. 예를 들어, 속도가 없고 카트의 위치만 있다면 현재 속도를 예측하기 위해 현재의 관측뿐만 아니라 이전 관측도 고려해야 합니다. 관측에 잡음이 있을 때도 같은 경우입니다. 현재 상태를 근사하게 추정하기 위해 과거 몇 개의 관측을 사용하는 것이 좋을 것입니다. 이 문제는 아주 간단해서 현재 관측에 잡음이 없고 환경의 모든 상태가 담겨 있습니다. 정책 네트워크에서 만든 확률을 기반으로 가장 높은 확률을 가진 행동을 고르지 않고 왜 랜덤하게 행동을 선택하는지 궁금할 수 있습니다. 이런 방식이 에이전트가 새 행동을 *탐험*하는 것과 잘 동작하는 행동을 *이용*하는 것 사이에 균형을 맞추게 합니다. 만약 어떤 레스토랑에 처음 방문했다고 가정합시다. 모든 메뉴에 대한 선호도가 동일하므로 랜덤하게 하나를 고릅니다. 이 메뉴가 맛이 좋았다면 다음에 이를 주문할 가능성을 높일 것입니다. 하지만 100% 확률이 되어서는 안됩니다. 그렇지 않으면 다른 메뉴를 전혀 선택하지 않게 되고 더 좋을 수 있는 메뉴를 시도해 보지 못하게 됩니다. 정책 신경망을 랜덤하게 초기화하고 게임 하나를 플레이해 보겠습니다:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
랜덤하게 초기화한 정책 네트워크가 얼마나 잘 동작하는지 확인해 보겠습니다:
###Code
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
음.. 별로 좋지 않네요. 신경망이 더 잘 학습되어야 합니다. 먼저 앞서 사용한 기본 정책을 학습할 수 있는지 확인해 보겠습니다. 막대가 왼쪽으로 기울어지면 왼쪽으로 움직이고 오른쪽으로 기울어지면 오른쪽으로 이동해야 합니다. 다음 코드는 같은 신경망이지만 타깃 확률 `y`와 훈련 연산(`cross_entropy`, `optimizer`, `training_op`)을 추가했습니다:
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # 행동 0(왼쪽)에 대한 확률
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
동일한 네트워크를 동시에 10개의 다른 환경에서 플레이하고 1,000번 반복동안 훈련시키겠습니다. 완료되면 환경을 리셋합니다.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # angle<0 이면 proba(left)=1. 이 되어야 하고 그렇지 않으면 proba(left)=0. 이 되어야 합니다
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_basic.ckpt
###Markdown
정책을 잘 학습한 것 같네요. 이제 스스로 더 나은 정책을 학습할 수 있는지 알아 보겠습니다. 정책 그래디언트 신경망을 훈련하기 위해 타깃 확률 `y`를 정의할 필요가 있습니다. 행동이 좋다면 이 확률을 증가시켜야 하고 반대로 나쁘면 이를 감소시켜야 합니다. 하지만 행동이 좋은지 나쁜지 어떻게 알 수 있을까요? 대부분의 행동으로 인한 영향은 뒤늦게 나타나는 것이 문제입니다. 게임에서 이기거나 질 때 어떤 행동이 이런 결과에 영향을 미쳤는지 명확하지 않습니다. 마지막 행동일까요? 아니면 마지막 10개의 행동일까요? 아니면 50번 스텝 앞의 행동일까요? 이를 *신용 할당 문제*라고 합니다.*정책 그래디언트* 알고리즘은 먼저 여러번 게임을 플레이하고 성공한 게임에서의 행동을 조금 더 높게 실패한 게임에서는 조금 더 낮게 되도록 하여 이 문제를 해결합니다. 먼저 게임을 진행해 보고 다시 어떻게 한 것인지 살펴 보겠습니다.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # 행동 0(왼쪽)에 대한 확률
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\r반복: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_pg.ckpt
###Markdown
마르코프 연쇄
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # s0에서 s0, s1, s2, s3으로
[0.0, 0.0, 0.9, 0.1], # s1에서 ...
[0.0, 1.0, 0.0, 0.0], # s2에서 ...
[0.0, 0.0, 0.0, 1.0], # s3에서 ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("상태:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
상태: 0 0 3
상태: 0 1 2 1 2 1 2 1 2 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
상태: 0 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
상태: 0 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
상태: 0 0 3
상태: 0 0 0 1 2 1 2 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
마르코프 결정 과정
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # s0에서, 행동 a0이 선택되면 0.7의 확률로 상태 s0로 가고 0.3의 확률로 상태 s1로 가는 식입니다.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("상태 (+보상):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("전체 보상 =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("요약: 평균={:.1f}, 표준 편차={:1f}, 최소={}, 최대={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
상태 (+보상): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 210
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... 전체 보상 = 70
상태 (+보상): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 70
상태 (+보상): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... 전체 보상 = -10
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... 전체 보상 = 290
요약: 평균=121.1, 표준 편차=129.333766, 최소=-330, 최대=470
policy_random
상태 (+보상): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... 전체 보상 = -60
상태 (+보상): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... 전체 보상 = -30
상태 (+보상): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... 전체 보상 = 10
상태 (+보상): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... 전체 보상 = 0
상태 (+보상): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... 전체 보상 = 40
요약: 평균=-22.1, 표준 편차=88.152740, 최소=-380, 최대=200
policy_safe
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
상태 (+보상): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... 전체 보상 = 30
상태 (+보상): 0 (10) 0 1 1 1 1 1 1 1 1 ... 전체 보상 = 10
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
요약: 평균=22.3, 표준 편차=26.244312, 최소=0, 최대=170
###Markdown
Q-러닝 Q-러닝은 에이전트가 플레이하는 것(가령, 랜덤하게)을 보고 점진적으로 Q-가치 추정을 향상시킵니다. 정확한 (또는 충분히 이에 가까운) Q-가치가 추정되면 최적의 정책은 가장 높은 Q-가치(즉, 그리디 정책)를 가진 행동을 선택하는 것이 됩니다.
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # 그리디한 정책
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("요약: 평균={:.1f}, 표준 편차={:1f}, 최소={}, 최대={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
상태 (+보상): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... 전체 보상 = 230
상태 (+보상): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... 전체 보상 = 90
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 170
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 220
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... 전체 보상 = -50
요약: 평균=125.6, 표준 편차=127.363464, 최소=-290, 최대=500
###Markdown
DQN 알고리즘으로 미스팩맨 게임 학습하기 미스팩맨 환경 만들기
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
전처리 이미지 전처리는 선택 사항이지만 훈련 속도를 크게 높여 줍니다.
###Code
mspacman_color = 210 + 164 + 74
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # 자르고 크기를 줄입니다.
img = img.sum(axis=2) # 흑백 스케일로 변환합니다.
img[img==mspacman_color] = 0 # 대비를 높입니다.
img = (img // 3 - 128).astype(np.int8) # -128~127 사이로 정규화합니다.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
###Output
_____no_output_____
###Markdown
노트 `preprocess_observation()` 함수가 책에 있는 것과 조금 다릅니다. 64비트 부동소수를 -1.0~1.0 사이로 나타내지 않고 부호있는 바이트(-128~127 사이)로 표현합니다. 이렇게 하는 이유는 재생 메모리가 약 8배나 적게 소모되기 때문입니다(52GB에서 6.5GB로). 정밀도를 감소시켜도 눈에 띄이게 훈련에 미치는 영향은 없습니다.
###Code
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("원본 관측 (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("전처리된 관측 (88×80 그레이스케일)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
DQN 만들기
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3은 11x10 크기의 64개의 맵을 가집니다
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9개의 행동이 가능합니다
initializer = tf.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state / 128.0 # 픽셀 강도를 [-1.0, 1.0] 범위로 스케일 변경합니다.
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
노트: 처음 책을 쓸 때는 타깃 Q-가치(y)와 예측 Q-가치(q_value) 사이의 제곱 오차를 사용했습니다. 하지만 매우 잡음이 많은 경험 때문에 작은 오차(1.0 이하)에 대해서만 손실에 이차식을 사용하고, 큰 오차에 대해서는 위의 계산식처럼 선형적인 손실(절대 오차의 두 배)을 사용하는 것이 더 낫습니다. 이렇게 하면 큰 오차가 모델 파라미터를 너무 많이 변경하지 못합니다. 또 몇 가지 하이퍼파라미터를 조정했습니다(작은 학습률을 사용하고 논문에 따르면 적응적 경사 하강법 알고리즘이 이따금 나쁜 성능을 낼 수 있으므로 Adam 최적화대신 네스테로프 가속 경사를 사용합니다). 아래에서 몇 가지 다른 하이퍼파라미터도 수정했습니다(재생 메모리 크기 확대, e-그리디 정책을 위한 감쇠 단계 증가, 할인 계수 증가, 온라인 DQN에서 타깃 DQN으로 복사 빈도 축소 등입니다).
###Code
from collections import deque
replay_memory_size = 500000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = np.random.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # 상태, 행동, 보상, 다음 상태, 계속
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
ReplayMemory 클래스를 사용한 방법 ================== 랜덤 억세스(random access)가 훨씬 빠르기 때문에 deque 대신에 ReplayMemory 클래스를 사용합니다(기여해 준 @NileshPS 님 감사합니다). 또 기본적으로 중복을 허용하여 샘플하면 큰 재생 메모리에서 중복을 허용하지 않고 샘플링하는 것보다 훨씬 빠릅니다.
###Code
class ReplayMemory:
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
def sample(self, batch_size, with_replacement=True):
if with_replacement:
indices = np.random.randint(self.length, size=batch_size) # 더 빠름
else:
indices = np.random.permutation(self.length)[:batch_size]
return self.buf[indices]
replay_memory_size = 500000
replay_memory = ReplayMemory(replay_memory_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # 상태, 행동, 보상, 다음 상태, 계속
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
=============================================
###Code
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # 랜덤 행동
else:
return np.argmax(q_values) # 최적 행동
n_steps = 4000000 # 전체 훈련 스텝 횟수
training_start = 10000 # 10,000번 게임을 반복한 후에 훈련을 시작합니다
training_interval = 4 # 4번 게임을 반복하고 훈련 스텝을 실행합니다
save_steps = 1000 # 1,000번 훈련 스텝마다 모델을 저장합니다
copy_steps = 10000 # 10,000번 훈련 스텝마다 온라인 DQN을 타깃 DQN으로 복사합니다
discount_rate = 0.99
skip_start = 90 # 게임의 시작 부분은 스킵합니다 (시간 낭비이므로).
batch_size = 50
iteration = 0 # 게임 반복횟수
checkpoint_path = "./my_dqn.ckpt"
done = True # 환경을 리셋해야 합니다
###Output
_____no_output_____
###Markdown
학습 과정을 트래킹하기 위해 몇 개의 변수가 필요합니다:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
이제 훈련 반복 루프입니다!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\r반복 {}\t훈련 스텝 {}/{} ({:.1f})%\t손실 {:5f}\t평균 최대-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # 게임이 종료되면 다시 시작합니다
obs = env.reset()
for skip in range(skip_start): # 게임 시작 부분은 스킵합니다
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# 온라인 DQN이 해야할 행동을 평가합니다
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# 온라인 DQN으로 게임을 플레이합니다.
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# 재생 메모리에 기록합니다
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# 트래킹을 위해 통계값을 계산합니다 (책에는 없습니다)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # 워밍엄 시간이 지난 후에 일정 간격으로 훈련합니다
# 메모리에서 샘플링하여 타깃 Q-가치를 얻기 위해 타깃 DQN을 사용합니다
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# 온라인 DQN을 훈련시킵니다
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# 온라인 DQN을 타깃 DQN으로 일정 간격마다 복사합니다
if step % copy_steps == 0:
copy_online_to_target.run()
# 일정 간격으로 저장합니다
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
INFO:tensorflow:Restoring parameters from ./my_dqn.ckpt
반복 13992 훈련 스텝 3999999/4000000 (100.0)% 손실 1.759242 평균 최대-Q 221.083446
###Markdown
아래 셀에서 에이전트를 테스트하기 위해 언제든지 위의 셀을 중지할 수 있습니다. 그런다음 다시 위의 셀을 실행하면 마지막으로 저장된 파라미터를 로드하여 훈련을 다시 시작할 것입니다.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# 온라인 DQN이 해야할 행동을 평가합니다
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# 온라인 DQN이 게임을 플레이합니다
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
video = plot_animation(frames, figsize=(5,6))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
추가 자료 브레이크아웃(Breakout)을 위한 전처리 다음은 Breakout-v0 아타리 게임을 위한 DQN을 훈련시키기 위해 사용할 수 있는 전처리 함수입니다:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # 자르고 크기를 줄입니다.
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("원본 관측 (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("전처리된 관측 (80×80 그레이스케일)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
여기서 볼 수 있듯이 하나의 이미지는 볼의 방향과 속도에 대한 정보가 없습니다. 이 정보들은 이 게임에 아주 중요합니다. 이런 이유로 실제로 몇 개의 연속된 관측을 연결하여 환경의 상태를 표현하는 것이 좋습니다. 한 가지 방법은 관측당 하나의 채널을 할당하여 멀티 채널 이미지를 만드는 것입니다. 다른 방법은 `np.max()` 함수를 사용해 최근의 관측을 모두 싱글 채널 이미지로 합치는 것입니다. 여기에서는 이전 이미지를 흐리게하여 DQN이 현재와 이전을 구분할 수 있도록 했습니다.
###Code
from collections import deque
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("멀티 채널 상태")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("싱글 채널 상태")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
연습문제 해답 1. to 7. 부록 A 참조. 8. BipedalWalker-v2 *문제: 정책 그래디언트를 사용해 OpenAI 짐의 ‘BypedalWalker-v2’를 훈련시켜보세요*
###Code
import gym
env = gym.make("BipedalWalker-v2")
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
###Markdown
노트: 만약 `BipedalWalker-v2` 환경을 만들 때 "`module 'Box2D._Box2D' has no attribute 'RAND_LIMIT'`"와 같은 이슈가 발생하면 다음과 같이 해보세요:```$ pip uninstall Box2D-kengz$ pip install git+https://github.com/pybox2d/pybox2d```
###Code
obs = env.reset()
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
obs
###Output
_____no_output_____
###Markdown
이 24개의 숫자에 대한 의미는 [온라인 문서](https://github.com/openai/gym/wiki/BipedalWalker-v2)를 참고하세요.
###Code
env.action_space
env.action_space.low
env.action_space.high
###Output
_____no_output_____
###Markdown
이는 각 다리의 엉덩이 관절의 토크와 발목 관절 토크를 제어하는 연속적인 4D 행동 공간입니다(-1에서 1까지). 연속적인 행동 공간을 다루기 위한 한 가지 방법은 이를 불연속적으로 나누는 것입니다. 예를 들어, 가능한 토크 값을 3개의 값 -1.0, 0.0, 1.0으로 제한할 수 있습니다. 이렇게 하면 가능한 행동은 $3^4=81$개가 됩니다.
###Code
from itertools import product
possible_torques = np.array([-1.0, 0.0, 1.0])
possible_actions = np.array(list(product(possible_torques, possible_torques, possible_torques, possible_torques)))
possible_actions.shape
tf.reset_default_graph()
# 1. 네트워크 구조를 정의합니다
n_inputs = env.observation_space.shape[0] # == 24
n_hidden = 10
n_outputs = len(possible_actions) # == 625
initializer = tf.variance_scaling_initializer()
# 2. 신경망을 만듭니다
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.selu,
kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
outputs = tf.nn.softmax(logits)
# 3. 추정 확률에 기초하여 무작위한 행동을 선택합니다
action_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=-1)
# 4. 훈련
learning_rate = 0.01
y = tf.one_hot(action_index, depth=len(possible_actions))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
아직 훈련되지 않았지만 이 정책 네트워크를 실행해 보죠.
###Code
def run_bipedal_walker(model_path=None, n_max_steps = 1000):
env = gym.make("BipedalWalker-v2")
frames = []
with tf.Session() as sess:
if model_path is None:
init.run()
else:
saver.restore(sess, model_path)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_index_val = action_index.eval(feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
if done:
break
env.close()
return frames
frames = run_bipedal_walker()
video = plot_animation(frames)
HTML(video.to_html5_video())
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
###Markdown
안되네요, 걷지를 못합니다. 그럼 훈련시켜 보죠!
###Code
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 1000
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}/{}".format(iteration + 1, n_iterations), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_index_val, gradients_val = sess.run([action_index, gradients],
feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_bipedal_walker_pg.ckpt")
frames = run_bipedal_walker("./my_bipedal_walker_pg.ckpt")
video = plot_animation(frames)
HTML(video.to_html5_video())
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_bipedal_walker_pg.ckpt
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exersices in chapter 16. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import sys
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
Note: there may be minor differences between the output of this notebook and the examples shown in the book. You can safely ignore these differences. They are mainly due to the fact that most of the environments provided by OpenAI gym have some randomness. Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
import matplotlib.pyplot as plt
import matplotlib
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
_____no_output_____
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
plt.savefig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation (it's a bit jittery within Jupyter):
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. Fixing the rendering issue Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebookIf Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function
except Exception:
openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# use OpenAI gym's rendering function
return env.render(mode="rgb_array")
else:
# rendering for the cart pole environment (in case OpenAI gym can't do it)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # Blue Green Red
pole_col = 0x669acc # Blue Green Red
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.The main differences relevant to this chapter are:* the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).* the `weights` parameter was renamed to `kernel`,* the default activation is `None` instead of `tf.nn.relu`
###Code
import tensorflow as tf
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
_____no_output_____
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
_____no_output_____
###Markdown
Q-Learning Q-Learning works by watching an agent play (e.g., randomly) and gradually improving its estimates of the Q-Values. Once it has accurate Q-Value estimates (or close enough), then the optimal policy consists in choosing the action that has the highest Q-Value (i.e., the greedy policy).
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
_____no_output_____
###Markdown
Learning to Play MsPacman Using the DQN Algorithm **Warning**: Unfortunately, the first version of the book contained two important errors in this section.1. The actor DQN and critic DQN should have been named _online DQN_ and _target DQN_ respectively. Actor-critic algorithms are a distinct class of algorithms.2. The online DQN is the one that learns and is copied to the target DQN at regular intervals. The target DQN's only role is to estimate the next state's Q-Values for each possible action. This is needed to compute the target Q-Values for training the online DQN, as shown in this equation:$y(s,a) = \text{r} + \gamma . \underset{a'}{\max} \, Q_\text{target}(s', a')$* $y(s,a)$ is the target Q-Value to train the online DQN for the state-action pair $(s, a)$.* $r$ is the reward actually collected after playing action $a$ in state $s$.* $\gamma$ is the discount rate.* $s'$ is the state actually reached after played action $a$ in state $s$.* $a'$ is one of the possible actions in state $s'$.* $Q_\text{target}(s', a')$ is the target DQN's estimate of the Q-Value of playing action $a'$ while in state $s'$.I hope these errors did not affect you, and if they did, I sincerely apologize. Creating the MsPacman environment
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = 210 + 164 + 74
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.sum(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img // 3 - 128).astype(np.int8) # normalize from -128 to 127
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
###Output
_____no_output_____
###Markdown
Note: the `preprocess_observation()` function is slightly different from the one in the book: instead of representing pixels as 64-bit floats from -1.0 to 1.0, it represents them as signed bytes (from -128 to 127). The benefit is that the replay memory will take up roughly 8 times less RAM (about 6.5 GB instead of 52 GB). The reduced precision has no visible impact on training.
###Code
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN Note: instead of using `tf.contrib.layers.convolution2d()` or `tf.contrib.layers.conv2d()` (as in the first version of the book), we now use the `tf.layers.conv2d()`, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:* the `num_outputs` parameter was renamed to `filters`,* the `stride` parameter was renamed to `strides`,* the `_fn` suffix was removed from parameter names that had it (e.g., `activation_fn` was renamed to `activation`),* the `weights_initializer` parameter was renamed to `kernel_initializer`,* the weights variable was renamed to `"kernel"` (instead of `"weights"`), and the biases variable was renamed from `"biases"` to `"bias"`,* and the default `activation` is now `None` instead of `tf.nn.relu`.
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9 discrete actions are available
initializer = tf.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state / 128.0 # scale pixel intensities to the [-1.0, 1.0] range.
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Note: in the first version of the book, the loss function was simply the squared error between the target Q-Values (`y`) and the estimated Q-Values (`q_value`). However, because the experiences are very noisy, it is better to use a quadratic loss only for small errors (below 1.0) and a linear loss (twice the absolute error) for larger errors, which is what the code above computes. This way large errors don't push the model parameters around as much. Note that we also tweaked some hyperparameters (using a smaller learning rate, and using Nesterov Accelerated Gradients rather than Adam optimization, since adaptive gradient algorithms may sometimes be bad, according to this [paper](https://arxiv.org/abs/1705.08292)). We also tweaked a few other hyperparameters below (a larger replay memory, longer decay for the $\epsilon$-greedy policy, larger discount rate, less frequent copies of the online DQN to the target DQN, etc.). We use this `ReplayMemory` class instead of a `deque` because it is much faster for random access (thanks to @NileshPS who contributed it). Moreover, we default to sampling with replacement, which is much faster than sampling without replacement for large replay memories.
###Code
class ReplayMemory:
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
def sample(self, batch_size, with_replacement=True):
if with_replacement:
indices = np.random.randint(self.length, size=batch_size) # faster
else:
indices = np.random.permutation(self.length)[:batch_size]
return self.buf[indices]
replay_memory_size = 500000
replay_memory = ReplayMemory(replay_memory_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 4000000 # total number of training steps
training_start = 10000 # start training after 10,000 game iterations
training_interval = 4 # run a training step every 4 game iterations
save_steps = 1000 # save the model every 1,000 training steps
copy_steps = 10000 # copy online DQN to target DQN every 10,000 training steps
discount_rate = 0.99
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset
###Output
_____no_output_____
###Markdown
A few variables for tracking progress:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
And now the main training loop!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f})%\tLoss {:5f}\tMean Max-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Online DQN plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# Compute statistics for tracking progress (not shown in the book)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # only train after warmup period and at regular intervals
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# Train the online DQN
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy the online DQN to the target DQN
if step % copy_steps == 0:
copy_online_to_target.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
_____no_output_____
###Markdown
You can interrupt the cell above at any time to test your agent using the cell below. You can then run the cell above once again, it will load the last parameters saved and resume training.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# Online DQN plays
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Extra material Preprocessing for Breakout Here is a preprocessing function you can use to train a DQN for the Breakout-v0 Atari game:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # crop and downsize
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (80×80 grayscale)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, a single image does not give you the direction and speed of the ball, which are crucial informations for playing this game. For this reason, it is best to actually combine several consecutive observations to create the environment's state representation. One way to do that is to create a multi-channel image, with one channel per recent observation. Another is to merge all recent observations into a single-channel image, using `np.max()`. In this case, we need to dim the older images so that the DQN can distinguish the past from the present.
###Code
from collections import deque
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Multichannel state")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("Singlechannel state")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Exercise solutions 1. to 7. See Appendix A. 8. BipedalWalker-v2 Exercise: _Use policy gradients to tackle OpenAI gym's "BipedalWalker-v2"._
###Code
import gym
env = gym.make("BipedalWalker-v2")
###Output
_____no_output_____
###Markdown
Note: if you run into [this issue](https://github.com/openai/gym/issues/100) ("`module 'Box2D._Box2D' has no attribute 'RAND_LIMIT'`") when making the `BipedalWalker-v2` environment, then try this workaround:```$ pip uninstall Box2D-kengz$ pip install git+https://github.com/pybox2d/pybox2d```
###Code
obs = env.reset()
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
obs
###Output
_____no_output_____
###Markdown
You can find the meaning of each of these 24 numbers in the [documentation](https://github.com/openai/gym/wiki/BipedalWalker-v2).
###Code
env.action_space
env.action_space.low
env.action_space.high
###Output
_____no_output_____
###Markdown
This is a 4D continuous action space controling each leg's hip torque and knee torque (from -1 to 1). To deal with a continuous action space, one method is to discretize it. For example, let's limit the possible torque values to these 3 values: -1.0, 0.0, and 1.0. This means that we are left with $3^4=81$ possible actions.
###Code
from itertools import product
possible_torques = np.array([-1.0, 0.0, 1.0])
possible_actions = np.array(list(product(possible_torques, possible_torques, possible_torques, possible_torques)))
possible_actions.shape
tf.reset_default_graph()
# 1. Specify the network architecture
n_inputs = env.observation_space.shape[0] # == 24
n_hidden = 10
n_outputs = len(possible_actions) # == 625
initializer = tf.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.selu,
kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
outputs = tf.nn.softmax(logits)
# 3. Select a random action based on the estimated probabilities
action_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=-1)
# 4. Training
learning_rate = 0.01
y = tf.one_hot(action_index, depth=len(possible_actions))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Let's try running this policy network, although it is not trained yet.
###Code
def run_bipedal_walker(model_path=None, n_max_steps = 1000):
env = gym.make("BipedalWalker-v2")
frames = []
with tf.Session() as sess:
if model_path is None:
init.run()
else:
saver.restore(sess, model_path)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_index_val = action_index.eval(feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
if done:
break
env.close()
return frames
frames = run_bipedal_walker()
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, it really can't walk. So let's train it!
###Code
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 1000
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}/{}".format(iteration + 1, n_iterations), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_index_val, gradients_val = sess.run([action_index, gradients],
feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_bipedal_walker_pg.ckpt")
frames = run_bipedal_walker("./my_bipedal_walker_pg.ckpt")
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
**16장 – 강화 학습** _이 노트북은 15장에 있는 모든 샘플 코드와 연습문제 해답을 가지고 있습니다._ 설정 파이썬 2와 3을 모두 지원합니다. 공통 모듈을 임포트하고 맷플롯립 그림이 노트북 안에 포함되도록 설정하고 생성한 그림을 저장하기 위한 함수를 준비합니다:
###Code
# 파이썬 2와 파이썬 3 지원
from __future__ import division, print_function, unicode_literals
# 공통
import numpy as np
import os
import sys
# 일관된 출력을 위해 유사난수 초기화
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# 맷플롯립 설정
from IPython.display import HTML
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# 한글출력
plt.rcParams['font.family'] = 'NanumBarunGothic'
plt.rcParams['axes.unicode_minus'] = False
# 그림을 저장할 폴더
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
OpenAI 짐(gym) 이 노트북에서는 강화 학습 알고리즘을 개발하고 비교할 수 있는 훌륭한 도구인 [OpenAI 짐(gym)](https://gym.openai.com/)을 사용합니다. 짐은 *에이전트*가 학습할 수 있는 많은 환경을 제공합니다. `gym`을 임포트해 보죠:
###Code
import gym
###Output
_____no_output_____
###Markdown
그다음 MsPacman 환경 버전 0을 로드합니다.
###Code
env = gym.make('MsPacman-v0')
###Output
_____no_output_____
###Markdown
`reset()` 메서드를 호출하여 환경을 초기화합니다. 이 메서드는 하나의 관측을 반환합니다:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
관측은 환경마다 다릅니다. 여기에서는 [width, height, channels] 크기의 3D 넘파이 배열로 저장되어 있는 RGB 이미지입니다(채널은 3개로 빨강, 초록, 파랑입니다). 잠시 후에 보겠지만 다른 환경에서는 다른 오브젝트가 반환될 수 있습니다.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
환경은 `render()` 메서드를 사용하여 화면에 나타낼 수 있고 렌더링 모드를 고를 수 있습니다(렌더링 옵션은 환경마다 다릅니다). 이 경우에는 `mode="rgb_array"`로 지정해서 넘파이 배열로 환경에 대한 이미지를 받겠습니다:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
이미지를 그려보죠:
###Code
plt.figure(figsize=(5,6))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
1980년대로 돌아오신 걸 환영합니다! :) 이 환경에서는 렌더링된 이미지가 관측과 동일합니다(하지만 많은 경우에 그렇지 않습니다):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
환경을 그리기 위한 유틸리티 함수를 만들겠습니다:
###Code
def plot_environment(env, figsize=(5,6)):
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
환경을 어떻게 다루는지 보겠습니다. 에이전트는 "행동 공간"(가능한 행동의 모음)에서 하나의 행동을 선택합니다. 이 환경의 액션 공간을 다음과 같습니다:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)`는 가능한 행동이 정수 0에서부터 8까지있다는 의미입니다. 이는 조이스틱의 9개의 위치(0=중앙, 1=위, 2=오른쪽, 3=왼쪽, 4=아래, 5=오른쪽위, 6=왼쪽위, 7=오른쪽아래, 8=왼쪽아래)에 해당합니다. 그다음 환경에게 플레이할 행동을 알려주고 게임의 다음 단계를 진행시킵니다. 왼쪽으로 110번을 진행하고 왼쪽아래로 40번을 진행해 보겠습니다:
###Code
env.reset()
for step in range(110):
env.step(3) #왼쪽
for step in range(40):
env.step(8) #왼쪽아래
###Output
_____no_output_____
###Markdown
어디에 있을까요?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
사실 `step()` 함수는 여러 개의 중요한 객체를 반환해 줍니다:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
앞서 본 것처럼 관측은 보이는 환경을 설명합니다. 여기서는 210x160 RGB 이미지입니다:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
환경은 마지막 스텝에서 받을 수 있는 보상을 알려 줍니다:
###Code
reward
###Output
_____no_output_____
###Markdown
게임이 종료되면 환경은 `done=True`를 반환합니다:
###Code
done
###Output
_____no_output_____
###Markdown
마지막으로 `info`는 환경의 내부 상태에 관한 추가 정보를 제공하는 딕셔너리입니다. 디버깅에는 유용하지만 에이전트는 학습을 위해서 이 정보를 사용하면 안됩니다(학습이 아니고 속이는 셈이므로).
###Code
info
###Output
_____no_output_____
###Markdown
10번의 스텝마다 랜덤한 방향을 선택하는 식으로 전체 게임(3개의 팩맨)을 플레이하고 각 프레임을 저장해 보겠습니다:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
이제 애니메이션으로 한번 보죠:
###Code
def update_scene(num, frames, patch):
plt.close() # 이전 그래프를 닫지 않으면 두 개의 그래프가 출력되는 matplotlib의 버그로 보입니다.
patch.set_data(frames[num])
return patch,
def plot_animation(frames, figsize=(5,6), repeat=False, interval=40):
fig = plt.figure(figsize=figsize)
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch),
frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
환경을 더 이상 사용하지 않으면 환경을 종료하여 자원을 반납합니다:
###Code
env.close()
###Output
_____no_output_____
###Markdown
첫 번째 에이전트를 학습시키기 위해 간단한 Cart-Pole 환경을 사용하겠습니다. 간단한 Cart-Pole 환경 Cart-Pole은 아주 간단한 환경으로 왼쪽이나 오른쪽으로 움직일 수 있는 카트와 카트 위에 수직으로 서 있는 막대로 구성되어 있습니다. 에이전트는 카트를 왼쪽이나 오른쪽으로 움직여서 막대가 넘어지지 않도록 유지시켜야 합니다.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
관측은 4개의 부동소수로 구성된 1D 넘파이 배열입니다. 각각 카트의 수평 위치, 속도, 막대의 각도(0=수직), 각속도를 나타냅니다. 이 환경을 렌더링하려면 먼저 몇 가지 이슈를 해결해야 합니다. 렌더링 이슈 해결하기 일부 환경(Cart-Pole을 포함하여)은 `rgb_array` 모드를 설정하더라도 별도의 창을 띄우기 위해 디스플레이 접근이 필수적입니다. 일반적으로 이 창을 무시하면 됩니다. 주피터가 헤드리스(headless) 서버로 (즉 스크린이 없이) 실행중이면 예외가 발생합니다. 이를 피하는 한가지 방법은 Xvfb 같은 가짜 X 서버를 설치하는 것입니다. `xvfb-run` 명령을 사용해 주피터를 실행합니다: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebook 주피터가 헤드리스 서버로 실행 중이지만 Xvfb를 설치하기 번거롭다면 Cart-Pole에 대해서는 다음 렌더링 함수를 사용할 수 있습니다:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # 문제없음, OpenAI 짐의 렌더링 함수를 사용합니다
except Exception:
openai_cart_pole_rendering = False # 가능한 X 서버가 없다면, 자체 렌더링 함수를 사용합니다
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# OpenAI 짐의 렌더링 함수를 사용합니다
return env.render(mode="rgb_array")
else:
# Cart-Pole 환경을 위한 렌더링 (OpenAI 짐이 처리할 수 없는 경우)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # 파랑 초록 빨강
pole_col = 0x669acc # 파랑 초록 빨강
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
행동 공간을 확인해 보겠습니다:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
네 딱 두 개의 행동이 있네요. 왼쪽이나 오른쪽 방향으로 가속합니다. 막대가 넘어지기 전까지 카트를 왼쪽으로 밀어보죠:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
막대가 실제로 넘어지지 않더라도 너무 기울어지면 게임이 끝납니다. 환경을 다시 초기화하고 이번에는 오른쪽으로 밀어보겠습니다:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
아까 말했던 것과 같은 상황인 것 같습니다. 어떻게 막대가 똑 바로 서있게 만들 수 있을까요? 이를 위한 *정책*을 만들어야 합니다. 이 정책은 에이전트가 각 스텝에서 행동을 선택하기 위해 사용할 전략입니다. 어떤 행동을 할지 결정하기 위해 지난 행동이나 관측을 사용할 수 있습니다. 하드 코딩 정책 간단한 정책을 하드 코딩해 보겠습니다. 막대가 왼쪽으로 기울어지면 카트를 왼쪽으로 밀고 반대의 경우는 오른쪽으로 밉니다. 작동이 되는지 확인해 보죠:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
아니네요, 불안정해서 몇 번 움직이고 막대가 너무 기울어져 게임이 끝났습니다. 더 똑똑한 정책이 필요합니다! 신경망 정책 관측을 입력으로 받고 각 관측에 대해 선택할 행동을 출력하는 신경망을 만들어 보겠습니다. 행동을 선택하기 위해 네트워크는 먼저 각 행동에 대한 확률을 추정하고 그다음 추정된 확률을 기반으로 랜덤하게 행동을 선택합니다. Cart-Pole 환경의 경우에는 두 개의 행동(왼쪽과 오른쪽)이 있으므로 하나의 출력 뉴런만 있으면 됩니다. 행동 0(왼쪽)에 대한 확률 `p`를 출력할 것입니다. 행동 1(오른쪽)에 대한 확률은 `1 - p`가 됩니다.
###Code
import tensorflow as tf
# 1. 네트워크 구조를 설정합니다
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # 간단한 작업이므로 너무 많은 뉴런이 필요하지 않습니다
n_outputs = 1 # 왼쪽으로 가속할 확률을 출력합니다
initializer = tf.variance_scaling_initializer()
# 2. 네트워크를 만듭니다
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. 추정된 확률을 기반으로 랜덤하게 행동을 선택합니다
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
이 환경은 각 관측이 환경의 모든 상태를 포함하고 있기 때문에 지난 행동과 관측은 무시해도 괜찮습니다. 숨겨진 상태가 있다면 이 정보를 추측하기 위해 이전 행동과 상태를 고려해야 합니다. 예를 들어, 속도가 없고 카트의 위치만 있다면 현재 속도를 예측하기 위해 현재의 관측뿐만 아니라 이전 관측도 고려해야 합니다. 관측에 잡음이 있을 때도 같은 경우입니다. 현재 상태를 근사하게 추정하기 위해 과거 몇 개의 관측을 사용하는 것이 좋을 것입니다. 이 문제는 아주 간단해서 현재 관측에 잡음이 없고 환경의 모든 상태가 담겨 있습니다. 정책 네트워크에서 만든 확률을 기반으로 가장 높은 확률을 가진 행동을 고르지 않고 왜 랜덤하게 행동을 선택하는지 궁금할 수 있습니다. 이런 방식이 에이전트가 새 행동을 *탐험*하는 것과 잘 동작하는 행동을 *이용*하는 것 사이에 균형을 맞추게 합니다. 만약 어떤 레스토랑에 처음 방문했다고 가정합시다. 모든 메뉴에 대한 선호도가 동일하므로 랜덤하게 하나를 고릅니다. 이 메뉴가 맛이 좋았다면 다음에 이를 주문할 가능성을 높일 것입니다. 하지만 100% 확률이 되어서는 안됩니다. 그렇지 않으면 다른 메뉴를 전혀 선택하지 않게 되고 더 좋을 수 있는 메뉴를 시도해 보지 못하게 됩니다. 정책 신경망을 랜덤하게 초기화하고 게임 하나를 플레이해 보겠습니다:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
랜덤하게 초기화한 정책 네트워크가 얼마나 잘 동작하는지 확인해 보겠습니다:
###Code
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
음.. 별로 좋지 않네요. 신경망이 더 잘 학습되어야 합니다. 먼저 앞서 사용한 기본 정책을 학습할 수 있는지 확인해 보겠습니다. 막대가 왼쪽으로 기울어지면 왼쪽으로 움직이고 오른쪽으로 기울어지면 오른쪽으로 이동해야 합니다. 다음 코드는 같은 신경망이지만 타깃 확률 `y`와 훈련 연산(`cross_entropy`, `optimizer`, `training_op`)을 추가했습니다:
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # 행동 0(왼쪽)에 대한 확률
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
동일한 네트워크를 동시에 10개의 다른 환경에서 플레이하고 1,000번 반복동안 훈련시키겠습니다. 완료되면 환경을 리셋합니다.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # angle<0 이면 proba(left)=1. 이 되어야 하고 그렇지 않으면 proba(left)=0. 이 되어야 합니다
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_basic.ckpt
###Markdown
정책을 잘 학습한 것 같네요. 이제 스스로 더 나은 정책을 학습할 수 있는지 알아 보겠습니다. 정책 그래디언트 신경망을 훈련하기 위해 타깃 확률 `y`를 정의할 필요가 있습니다. 행동이 좋다면 이 확률을 증가시켜야 하고 반대로 나쁘면 이를 감소시켜야 합니다. 하지만 행동이 좋은지 나쁜지 어떻게 알 수 있을까요? 대부분의 행동으로 인한 영향은 뒤늦게 나타나는 것이 문제입니다. 게임에서 이기거나 질 때 어떤 행동이 이런 결과에 영향을 미쳤는지 명확하지 않습니다. 마지막 행동일까요? 아니면 마지막 10개의 행동일까요? 아니면 50번 스텝 앞의 행동일까요? 이를 *신용 할당 문제*라고 합니다.*정책 그래디언트* 알고리즘은 먼저 여러번 게임을 플레이하고 성공한 게임에서의 행동을 조금 더 높게 실패한 게임에서는 조금 더 낮게 되도록 하여 이 문제를 해결합니다. 먼저 게임을 진행해 보고 다시 어떻게 한 것인지 살펴 보겠습니다.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # 행동 0(왼쪽)에 대한 확률
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\r반복: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_pg.ckpt
###Markdown
마르코프 연쇄
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # s0에서 s0, s1, s2, s3으로
[0.0, 0.0, 0.9, 0.1], # s1에서 ...
[0.0, 1.0, 0.0, 0.0], # s2에서 ...
[0.0, 0.0, 0.0, 1.0], # s3에서 ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("상태:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
상태: 0 0 3
상태: 0 1 2 1 2 1 2 1 2 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
상태: 0 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
상태: 0 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
상태: 0 0 3
상태: 0 0 0 1 2 1 2 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
마르코프 결정 과정
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # s0에서, 행동 a0이 선택되면 0.7의 확률로 상태 s0로 가고 0.3의 확률로 상태 s1로 가는 식입니다.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("상태 (+보상):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("전체 보상 =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("요약: 평균={:.1f}, 표준 편차={:1f}, 최소={}, 최대={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
상태 (+보상): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 210
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... 전체 보상 = 70
상태 (+보상): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 70
상태 (+보상): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... 전체 보상 = -10
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... 전체 보상 = 290
요약: 평균=121.1, 표준 편차=129.333766, 최소=-330, 최대=470
policy_random
상태 (+보상): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... 전체 보상 = -60
상태 (+보상): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... 전체 보상 = -30
상태 (+보상): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... 전체 보상 = 10
상태 (+보상): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... 전체 보상 = 0
상태 (+보상): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... 전체 보상 = 40
요약: 평균=-22.1, 표준 편차=88.152740, 최소=-380, 최대=200
policy_safe
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
상태 (+보상): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... 전체 보상 = 30
상태 (+보상): 0 (10) 0 1 1 1 1 1 1 1 1 ... 전체 보상 = 10
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
요약: 평균=22.3, 표준 편차=26.244312, 최소=0, 최대=170
###Markdown
Q-러닝 Q-러닝은 에이전트가 플레이하는 것(가령, 랜덤하게)을 보고 점진적으로 Q-가치 추정을 향상시킵니다. 정확한 (또는 충분히 이에 가까운) Q-가치가 추정되면 최적의 정책은 가장 높은 Q-가치(즉, 그리디 정책)를 가진 행동을 선택하는 것이 됩니다.
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # 그리디한 정책
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("요약: 평균={:.1f}, 표준 편차={:1f}, 최소={}, 최대={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
상태 (+보상): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... 전체 보상 = 230
상태 (+보상): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... 전체 보상 = 90
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 170
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 220
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... 전체 보상 = -50
요약: 평균=125.6, 표준 편차=127.363464, 최소=-290, 최대=500
###Markdown
DQN 알고리즘으로 미스팩맨 게임 학습하기 미스팩맨 환경 만들기
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
전처리 이미지 전처리는 선택 사항이지만 훈련 속도를 크게 높여 줍니다.
###Code
mspacman_color = 210 + 164 + 74
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # 자르고 크기를 줄입니다.
img = img.sum(axis=2) # 흑백 스케일로 변환합니다.
img[img==mspacman_color] = 0 # 대비를 높입니다.
img = (img // 3 - 128).astype(np.int8) # -128~127 사이로 정규화합니다.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
###Output
_____no_output_____
###Markdown
노트 `preprocess_observation()` 함수가 책에 있는 것과 조금 다릅니다. 64비트 부동소수를 -1.0~1.0 사이로 나타내지 않고 부호있는 바이트(-128~127 사이)로 표현합니다. 이렇게 하는 이유는 재생 메모리가 약 8배나 적게 소모되기 때문입니다(52GB에서 6.5GB로). 정밀도를 감소시켜도 눈에 띄이게 훈련에 미치는 영향은 없습니다.
###Code
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("원본 관측 (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("전처리된 관측 (88×80 그레이스케일)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
DQN 만들기
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3은 11x10 크기의 64개의 맵을 가집니다
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9개의 행동이 가능합니다
initializer = tf.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state / 128.0 # 픽셀 강도를 [-1.0, 1.0] 범위로 스케일 변경합니다.
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
노트: 처음 책을 쓸 때는 타깃 Q-가치(y)와 예측 Q-가치(q_value) 사이의 제곱 오차를 사용했습니다. 하지만 매우 잡음이 많은 경험 때문에 작은 오차(1.0 이하)에 대해서만 손실에 이차식을 사용하고, 큰 오차에 대해서는 위의 계산식처럼 선형적인 손실(절대 오차의 두 배)을 사용하는 것이 더 낫습니다. 이렇게 하면 큰 오차가 모델 파라미터를 너무 많이 변경하지 못합니다. 또 몇 가지 하이퍼파라미터를 조정했습니다(작은 학습률을 사용하고 논문에 따르면 적응적 경사 하강법 알고리즘이 이따금 나쁜 성능을 낼 수 있으므로 Adam 최적화대신 네스테로프 가속 경사를 사용합니다). 아래에서 몇 가지 다른 하이퍼파라미터도 수정했습니다(재생 메모리 크기 확대, e-그리디 정책을 위한 감쇠 단계 증가, 할인 계수 증가, 온라인 DQN에서 타깃 DQN으로 복사 빈도 축소 등입니다).
###Code
from collections import deque
replay_memory_size = 500000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = np.random.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # 상태, 행동, 보상, 다음 상태, 계속
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
ReplayMemory 클래스를 사용한 방법 ================== 랜덤 억세스(random access)가 훨씬 빠르기 때문에 deque 대신에 ReplayMemory 클래스를 사용합니다(기여해 준 @NileshPS 님 감사합니다). 또 기본적으로 중복을 허용하여 샘플하면 큰 재생 메모리에서 중복을 허용하지 않고 샘플링하는 것보다 훨씬 빠릅니다.
###Code
class ReplayMemory:
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
def sample(self, batch_size, with_replacement=True):
if with_replacement:
indices = np.random.randint(self.length, size=batch_size) # 더 빠름
else:
indices = np.random.permutation(self.length)[:batch_size]
return self.buf[indices]
replay_memory_size = 500000
replay_memory = ReplayMemory(replay_memory_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # 상태, 행동, 보상, 다음 상태, 계속
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
=============================================
###Code
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # 랜덤 행동
else:
return np.argmax(q_values) # 최적 행동
n_steps = 4000000 # 전체 훈련 스텝 횟수
training_start = 10000 # 10,000번 게임을 반복한 후에 훈련을 시작합니다
training_interval = 4 # 4번 게임을 반복하고 훈련 스텝을 실행합니다
save_steps = 1000 # 1,000번 훈련 스텝마다 모델을 저장합니다
copy_steps = 10000 # 10,000번 훈련 스텝마다 온라인 DQN을 타깃 DQN으로 복사합니다
discount_rate = 0.99
skip_start = 90 # 게임의 시작 부분은 스킵합니다 (시간 낭비이므로).
batch_size = 50
iteration = 0 # 게임 반복횟수
checkpoint_path = "./my_dqn.ckpt"
done = True # 환경을 리셋해야 합니다
###Output
_____no_output_____
###Markdown
학습 과정을 트래킹하기 위해 몇 개의 변수가 필요합니다:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
이제 훈련 반복 루프입니다!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\r반복 {}\t훈련 스텝 {}/{} ({:.1f})%\t손실 {:5f}\t평균 최대-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # 게임이 종료되면 다시 시작합니다
obs = env.reset()
for skip in range(skip_start): # 게임 시작 부분은 스킵합니다
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# 온라인 DQN이 해야할 행동을 평가합니다
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# 온라인 DQN으로 게임을 플레이합니다.
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# 재생 메모리에 기록합니다
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# 트래킹을 위해 통계값을 계산합니다 (책에는 없습니다)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # 워밍엄 시간이 지난 후에 일정 간격으로 훈련합니다
# 메모리에서 샘플링하여 타깃 Q-가치를 얻기 위해 타깃 DQN을 사용합니다
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# 온라인 DQN을 훈련시킵니다
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# 온라인 DQN을 타깃 DQN으로 일정 간격마다 복사합니다
if step % copy_steps == 0:
copy_online_to_target.run()
# 일정 간격으로 저장합니다
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
INFO:tensorflow:Restoring parameters from ./my_dqn.ckpt
반복 13992 훈련 스텝 3999999/4000000 (100.0)% 손실 0.765749 평균 최대-Q 221.037805
###Markdown
아래 셀에서 에이전트를 테스트하기 위해 언제든지 위의 셀을 중지할 수 있습니다. 그런다음 다시 위의 셀을 실행하면 마지막으로 저장된 파라미터를 로드하여 훈련을 다시 시작할 것입니다.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# 온라인 DQN이 해야할 행동을 평가합니다
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# 온라인 DQN이 게임을 플레이합니다
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
video = plot_animation(frames, figsize=(5,6))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
추가 자료 브레이크아웃(Breakout)을 위한 전처리 다음은 Breakout-v0 아타리 게임을 위한 DQN을 훈련시키기 위해 사용할 수 있는 전처리 함수입니다:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # 자르고 크기를 줄입니다.
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("원본 관측 (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("전처리된 관측 (80×80 그레이스케일)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
여기서 볼 수 있듯이 하나의 이미지는 볼의 방향과 속도에 대한 정보가 없습니다. 이 정보들은 이 게임에 아주 중요합니다. 이런 이유로 실제로 몇 개의 연속된 관측을 연결하여 환경의 상태를 표현하는 것이 좋습니다. 한 가지 방법은 관측당 하나의 채널을 할당하여 멀티 채널 이미지를 만드는 것입니다. 다른 방법은 `np.max()` 함수를 사용해 최근의 관측을 모두 싱글 채널 이미지로 합치는 것입니다. 여기에서는 이전 이미지를 흐리게하여 DQN이 현재와 이전을 구분할 수 있도록 했습니다.
###Code
from collections import deque
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("멀티 채널 상태")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("싱글 채널 상태")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
연습문제 해답 1. to 7. 부록 A 참조. 8. BipedalWalker-v2 *문제: 정책 그래디언트를 사용해 OpenAI 짐의 ‘BypedalWalker-v2’를 훈련시켜보세요*
###Code
import gym
env = gym.make("BipedalWalker-v2")
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
###Markdown
노트: 만약 `BipedalWalker-v2` 환경을 만들 때 "`module 'Box2D._Box2D' has no attribute 'RAND_LIMIT'`"와 같은 이슈가 발생하면 다음과 같이 해보세요:```$ pip uninstall Box2D-kengz$ pip install git+https://github.com/pybox2d/pybox2d```
###Code
obs = env.reset()
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
obs
###Output
_____no_output_____
###Markdown
이 24개의 숫자에 대한 의미는 [온라인 문서](https://github.com/openai/gym/wiki/BipedalWalker-v2)를 참고하세요.
###Code
env.action_space
env.action_space.low
env.action_space.high
###Output
_____no_output_____
###Markdown
이는 각 다리의 엉덩이 관절의 토크와 발목 관절 토크를 제어하는 연속적인 4D 행동 공간입니다(-1에서 1까지). 연속적인 행동 공간을 다루기 위한 한 가지 방법은 이를 불연속적으로 나누는 것입니다. 예를 들어, 가능한 토크 값을 3개의 값 -1.0, 0.0, 1.0으로 제한할 수 있습니다. 이렇게 하면 가능한 행동은 $3^4=81$개가 됩니다.
###Code
from itertools import product
possible_torques = np.array([-1.0, 0.0, 1.0])
possible_actions = np.array(list(product(possible_torques, possible_torques, possible_torques, possible_torques)))
possible_actions.shape
tf.reset_default_graph()
# 1. 네트워크 구조를 정의합니다
n_inputs = env.observation_space.shape[0] # == 24
n_hidden = 10
n_outputs = len(possible_actions) # == 625
initializer = tf.variance_scaling_initializer()
# 2. 신경망을 만듭니다
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.selu,
kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
outputs = tf.nn.softmax(logits)
# 3. 추정 확률에 기초하여 무작위한 행동을 선택합니다
action_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=-1)
# 4. 훈련
learning_rate = 0.01
y = tf.one_hot(action_index, depth=len(possible_actions))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
아직 훈련되지 않았지만 이 정책 네트워크를 실행해 보죠.
###Code
def run_bipedal_walker(model_path=None, n_max_steps = 1000):
env = gym.make("BipedalWalker-v2")
frames = []
with tf.Session() as sess:
if model_path is None:
init.run()
else:
saver.restore(sess, model_path)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_index_val = action_index.eval(feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
if done:
break
env.close()
return frames
frames = run_bipedal_walker()
video = plot_animation(frames)
HTML(video.to_html5_video())
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
###Markdown
안되네요, 걷지를 못합니다. 그럼 훈련시켜 보죠!
###Code
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 1000
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}/{}".format(iteration + 1, n_iterations), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_index_val, gradients_val = sess.run([action_index, gradients],
feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_bipedal_walker_pg.ckpt")
frames = run_bipedal_walker("./my_bipedal_walker_pg.ckpt")
video = plot_animation(frames)
HTML(video.to_html5_video())
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_bipedal_walker_pg.ckpt
###Markdown
**16장 – 강화 학습** _이 노트북은 15장에 있는 모든 샘플 코드와 연습문제 해답을 가지고 있습니다._ 설정 파이썬 2와 3을 모두 지원합니다. 공통 모듈을 임포트하고 맷플롯립 그림이 노트북 안에 포함되도록 설정하고 생성한 그림을 저장하기 위한 함수를 준비합니다:
###Code
# 파이썬 2와 파이썬 3 지원
from __future__ import division, print_function, unicode_literals
# 공통
import numpy as np
import os
import sys
# 일관된 출력을 위해 유사난수 초기화
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# 맷플롯립 설정
from IPython.display import HTML
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# 한글출력
plt.rcParams['font.family'] = 'NanumBarunGothic'
plt.rcParams['axes.unicode_minus'] = False
# 그림을 저장할 폴더
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
OpenAI 짐(gym) 이 노트북에서는 강화 학습 알고리즘을 개발하고 비교할 수 있는 훌륭한 도구인 [OpenAI 짐(gym)](https://gym.openai.com/)을 사용합니다. 짐은 *에이전트*가 학습할 수 있는 많은 환경을 제공합니다. `gym`을 임포트해 보죠:
###Code
import gym
###Output
_____no_output_____
###Markdown
그다음 MsPacman 환경 버전 0을 로드합니다.
###Code
env = gym.make('MsPacman-v0')
###Output
_____no_output_____
###Markdown
`reset()` 메서드를 호출하여 환경을 초기화합니다. 이 메서드는 하나의 관측을 반환합니다:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
관측은 환경마다 다릅니다. 여기에서는 [width, height, channels] 크기의 3D 넘파이 배열로 저장되어 있는 RGB 이미지입니다(채널은 3개로 빨강, 초록, 파랑입니다). 잠시 후에 보겠지만 다른 환경에서는 다른 오브젝트가 반환될 수 있습니다.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
환경은 `render()` 메서드를 사용하여 화면에 나타낼 수 있고 렌더링 모드를 고를 수 있습니다(렌더링 옵션은 환경마다 다릅니다). 이 경우에는 `mode="rgb_array"`로 지정해서 넘파이 배열로 환경에 대한 이미지를 받겠습니다:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
이미지를 그려보죠:
###Code
plt.figure(figsize=(5,6))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
1980년대로 돌아오신 걸 환영합니다! :) 이 환경에서는 렌더링된 이미지가 관측과 동일합니다(하지만 많은 경우에 그렇지 않습니다):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
환경을 그리기 위한 유틸리티 함수를 만들겠습니다:
###Code
def plot_environment(env, figsize=(5,6)):
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
환경을 어떻게 다루는지 보겠습니다. 에이전트는 "행동 공간"(가능한 행동의 모음)에서 하나의 행동을 선택합니다. 이 환경의 액션 공간을 다음과 같습니다:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)`는 가능한 행동이 정수 0에서부터 8까지있다는 의미입니다. 이는 조이스틱의 9개의 위치(0=중앙, 1=위, 2=오른쪽, 3=왼쪽, 4=아래, 5=오른쪽위, 6=왼쪽위, 7=오른쪽아래, 8=왼쪽아래)에 해당합니다. 그다음 환경에게 플레이할 행동을 알려주고 게임의 다음 단계를 진행시킵니다. 왼쪽으로 110번을 진행하고 왼쪽아래로 40번을 진행해 보겠습니다:
###Code
env.reset()
for step in range(110):
env.step(3) #왼쪽
for step in range(40):
env.step(8) #왼쪽아래
###Output
_____no_output_____
###Markdown
어디에 있을까요?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
사실 `step()` 함수는 여러 개의 중요한 객체를 반환해 줍니다:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
앞서 본 것처럼 관측은 보이는 환경을 설명합니다. 여기서는 210x160 RGB 이미지입니다:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
환경은 마지막 스텝에서 받을 수 있는 보상을 알려 줍니다:
###Code
reward
###Output
_____no_output_____
###Markdown
게임이 종료되면 환경은 `done=True`를 반환합니다:
###Code
done
###Output
_____no_output_____
###Markdown
마지막으로 `info`는 환경의 내부 상태에 관한 추가 정보를 제공하는 딕셔너리입니다. 디버깅에는 유용하지만 에이전트는 학습을 위해서 이 정보를 사용하면 안됩니다(학습이 아니고 속이는 셈이므로).
###Code
info
###Output
_____no_output_____
###Markdown
10번의 스텝마다 랜덤한 방향을 선택하는 식으로 전체 게임(3개의 팩맨)을 플레이하고 각 프레임을 저장해 보겠습니다:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
이제 애니메이션으로 한번 보죠:
###Code
def update_scene(num, frames, patch):
plt.close() # 이전 그래프를 닫지 않으면 두 개의 그래프가 출력되는 matplotlib의 버그로 보입니다.
patch.set_data(frames[num])
return patch,
def plot_animation(frames, figsize=(5,6), repeat=False, interval=40):
fig = plt.figure(figsize=figsize)
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch),
frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
환경을 더 이상 사용하지 않으면 환경을 종료하여 자원을 반납합니다:
###Code
env.close()
###Output
_____no_output_____
###Markdown
첫 번째 에이전트를 학습시키기 위해 간단한 Cart-Pole 환경을 사용하겠습니다. 간단한 Cart-Pole 환경 Cart-Pole은 아주 간단한 환경으로 왼쪽이나 오른쪽으로 움직일 수 있는 카트와 카트 위에 수직으로 서 있는 막대로 구성되어 있습니다. 에이전트는 카트를 왼쪽이나 오른쪽으로 움직여서 막대가 넘어지지 않도록 유지시켜야 합니다.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
관측은 4개의 부동소수로 구성된 1D 넘파이 배열입니다. 각각 카트의 수평 위치, 속도, 막대의 각도(0=수직), 각속도를 나타냅니다. 이 환경을 렌더링하려면 먼저 몇 가지 이슈를 해결해야 합니다. 렌더링 이슈 해결하기 일부 환경(Cart-Pole을 포함하여)은 `rgb_array` 모드를 설정하더라도 별도의 창을 띄우기 위해 디스플레이 접근이 필수적입니다. 일반적으로 이 창을 무시하면 됩니다. 주피터가 헤드리스(headless) 서버로 (즉 스크린이 없이) 실행중이면 예외가 발생합니다. 이를 피하는 한가지 방법은 Xvfb 같은 가짜 X 서버를 설치하는 것입니다. `xvfb-run` 명령을 사용해 주피터를 실행합니다: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebook 주피터가 헤드리스 서버로 실행 중이지만 Xvfb를 설치하기 번거롭다면 Cart-Pole에 대해서는 다음 렌더링 함수를 사용할 수 있습니다:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # 문제없음, OpenAI 짐의 렌더링 함수를 사용합니다
except Exception:
openai_cart_pole_rendering = False # 가능한 X 서버가 없다면, 자체 렌더링 함수를 사용합니다
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# OpenAI 짐의 렌더링 함수를 사용합니다
return env.render(mode="rgb_array")
else:
# Cart-Pole 환경을 위한 렌더링 (OpenAI 짐이 처리할 수 없는 경우)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # 파랑 초록 빨강
pole_col = 0x669acc # 파랑 초록 빨강
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
행동 공간을 확인해 보겠습니다:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
네 딱 두 개의 행동이 있네요. 왼쪽이나 오른쪽 방향으로 가속합니다. 막대가 넘어지기 전까지 카트를 왼쪽으로 밀어보죠:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
막대가 실제로 넘어지지 않더라도 너무 기울어지면 게임이 끝납니다. 환경을 다시 초기화하고 이번에는 오른쪽으로 밀어보겠습니다:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
아까 말했던 것과 같은 상황인 것 같습니다. 어떻게 막대가 똑 바로 서있게 만들 수 있을까요? 이를 위한 *정책*을 만들어야 합니다. 이 정책은 에이전트가 각 스텝에서 행동을 선택하기 위해 사용할 전략입니다. 어떤 행동을 할지 결정하기 위해 지난 행동이나 관측을 사용할 수 있습니다. 하드 코딩 정책 간단한 정책을 하드 코딩해 보겠습니다. 막대가 왼쪽으로 기울어지면 카트를 왼쪽으로 밀고 반대의 경우는 오른쪽으로 밉니다. 작동이 되는지 확인해 보죠:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
아니네요, 불안정해서 몇 번 움직이고 막대가 너무 기울어져 게임이 끝났습니다. 더 똑똑한 정책이 필요합니다! 신경망 정책 관측을 입력으로 받고 각 관측에 대해 선택할 행동을 출력하는 신경망을 만들어 보겠습니다. 행동을 선택하기 위해 네트워크는 먼저 각 행동에 대한 확률을 추정하고 그다음 추정된 확률을 기반으로 랜덤하게 행동을 선택합니다. Cart-Pole 환경의 경우에는 두 개의 행동(왼쪽과 오른쪽)이 있으므로 하나의 출력 뉴런만 있으면 됩니다. 행동 0(왼쪽)에 대한 확률 `p`를 출력할 것입니다. 행동 1(오른쪽)에 대한 확률은 `1 - p`가 됩니다.
###Code
import tensorflow as tf
# 1. 네트워크 구조를 설정합니다
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # 간단한 작업이므로 너무 많은 뉴런이 필요하지 않습니다
n_outputs = 1 # 왼쪽으로 가속할 확률을 출력합니다
initializer = tf.variance_scaling_initializer()
# 2. 네트워크를 만듭니다
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. 추정된 확률을 기반으로 랜덤하게 행동을 선택합니다
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
이 환경은 각 관측이 환경의 모든 상태를 포함하고 있기 때문에 지난 행동과 관측은 무시해도 괜찮습니다. 숨겨진 상태가 있다면 이 정보를 추측하기 위해 이전 행동과 상태를 고려해야 합니다. 예를 들어, 속도가 없고 카트의 위치만 있다면 현재 속도를 예측하기 위해 현재의 관측뿐만 아니라 이전 관측도 고려해야 합니다. 관측에 잡음이 있을 때도 같은 경우입니다. 현재 상태를 근사하게 추정하기 위해 과거 몇 개의 관측을 사용하는 것이 좋을 것입니다. 이 문제는 아주 간단해서 현재 관측에 잡음이 없고 환경의 모든 상태가 담겨 있습니다. 정책 네트워크에서 만든 확률을 기반으로 가장 높은 확률을 가진 행동을 고르지 않고 왜 랜덤하게 행동을 선택하는지 궁금할 수 있습니다. 이런 방식이 에이전트가 새 행동을 *탐험*하는 것과 잘 동작하는 행동을 *이용*하는 것 사이에 균형을 맞추게 합니다. 만약 어떤 레스토랑에 처음 방문했다고 가정합시다. 모든 메뉴에 대한 선호도가 동일하므로 랜덤하게 하나를 고릅니다. 이 메뉴가 맛이 좋았다면 다음에 이를 주문할 가능성을 높일 것입니다. 하지만 100% 확률이 되어서는 안됩니다. 그렇지 않으면 다른 메뉴를 전혀 선택하지 않게 되고 더 좋을 수 있는 메뉴를 시도해 보지 못하게 됩니다. 정책 신경망을 랜덤하게 초기화하고 게임 하나를 플레이해 보겠습니다:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
랜덤하게 초기화한 정책 네트워크가 얼마나 잘 동작하는지 확인해 보겠습니다:
###Code
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
음.. 별로 좋지 않네요. 신경망이 더 잘 학습되어야 합니다. 먼저 앞서 사용한 기본 정책을 학습할 수 있는지 확인해 보겠습니다. 막대가 왼쪽으로 기울어지면 왼쪽으로 움직이고 오른쪽으로 기울어지면 오른쪽으로 이동해야 합니다. 다음 코드는 같은 신경망이지만 타깃 확률 `y`와 훈련 연산(`cross_entropy`, `optimizer`, `training_op`)을 추가했습니다:
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # 행동 0(왼쪽)에 대한 확률
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
동일한 네트워크를 동시에 10개의 다른 환경에서 플레이하고 1,000번 반복동안 훈련시키겠습니다. 완료되면 환경을 리셋합니다.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # angle<0 이면 proba(left)=1. 이 되어야 하고 그렇지 않으면 proba(left)=0. 이 되어야 합니다
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_basic.ckpt
###Markdown
정책을 잘 학습한 것 같네요. 이제 스스로 더 나은 정책을 학습할 수 있는지 알아 보겠습니다. 정책 그래디언트 신경망을 훈련하기 위해 타깃 확률 `y`를 정의할 필요가 있습니다. 행동이 좋다면 이 확률을 증가시켜야 하고 반대로 나쁘면 이를 감소시켜야 합니다. 하지만 행동이 좋은지 나쁜지 어떻게 알 수 있을까요? 대부분의 행동으로 인한 영향은 뒤늦게 나타나는 것이 문제입니다. 게임에서 이기거나 질 때 어떤 행동이 이런 결과에 영향을 미쳤는지 명확하지 않습니다. 마지막 행동일까요? 아니면 마지막 10개의 행동일까요? 아니면 50번 스텝 앞의 행동일까요? 이를 *신용 할당 문제*라고 합니다.*정책 그래디언트* 알고리즘은 먼저 여러번 게임을 플레이하고 성공한 게임에서의 행동을 조금 더 높게 실패한 게임에서는 조금 더 낮게 되도록 하여 이 문제를 해결합니다. 먼저 게임을 진행해 보고 다시 어떻게 한 것인지 살펴 보겠습니다.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # 행동 0(왼쪽)에 대한 확률
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\r반복: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_pg.ckpt
###Markdown
마르코프 연쇄
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # s0에서 s0, s1, s2, s3으로
[0.0, 0.0, 0.9, 0.1], # s1에서 ...
[0.0, 1.0, 0.0, 0.0], # s2에서 ...
[0.0, 0.0, 0.0, 1.0], # s3에서 ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("상태:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
상태: 0 0 3
상태: 0 1 2 1 2 1 2 1 2 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
상태: 0 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
상태: 0 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
상태: 0 0 3
상태: 0 0 0 1 2 1 2 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
마르코프 결정 과정
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # s0에서, 행동 a0이 선택되면 0.7의 확률로 상태 s0로 가고 0.3의 확률로 상태 s1로 가는 식입니다.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("상태 (+보상):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("전체 보상 =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("요약: 평균={:.1f}, 표준 편차={:1f}, 최소={}, 최대={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
상태 (+보상): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 210
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... 전체 보상 = 70
상태 (+보상): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 70
상태 (+보상): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... 전체 보상 = -10
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... 전체 보상 = 290
요약: 평균=121.1, 표준 편차=129.333766, 최소=-330, 최대=470
policy_random
상태 (+보상): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... 전체 보상 = -60
상태 (+보상): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... 전체 보상 = -30
상태 (+보상): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... 전체 보상 = 10
상태 (+보상): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... 전체 보상 = 0
상태 (+보상): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... 전체 보상 = 40
요약: 평균=-22.1, 표준 편차=88.152740, 최소=-380, 최대=200
policy_safe
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
상태 (+보상): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... 전체 보상 = 30
상태 (+보상): 0 (10) 0 1 1 1 1 1 1 1 1 ... 전체 보상 = 10
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
요약: 평균=22.3, 표준 편차=26.244312, 최소=0, 최대=170
###Markdown
Q-러닝 Q-러닝은 에이전트가 플레이하는 것(가령, 랜덤하게)을 보고 점진적으로 Q-가치 추정을 향상시킵니다. 정확한 (또는 충분히 이에 가까운) Q-가치가 추정되면 최적의 정책은 가장 높은 Q-가치(즉, 그리디 정책)를 가진 행동을 선택하는 것이 됩니다.
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # 그리디한 정책
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("요약: 평균={:.1f}, 표준 편차={:1f}, 최소={}, 최대={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
상태 (+보상): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... 전체 보상 = 230
상태 (+보상): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... 전체 보상 = 90
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 170
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 220
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... 전체 보상 = -50
요약: 평균=125.6, 표준 편차=127.363464, 최소=-290, 최대=500
###Markdown
DQN 알고리즘으로 미스팩맨 게임 학습하기 미스팩맨 환경 만들기
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
전처리 이미지 전처리는 선택 사항이지만 훈련 속도를 크게 높여 줍니다.
###Code
mspacman_color = 210 + 164 + 74
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # 자르고 크기를 줄입니다.
img = img.sum(axis=2) # 흑백 스케일로 변환합니다.
img[img==mspacman_color] = 0 # 대비를 높입니다.
img = (img // 3 - 128).astype(np.int8) # -128~127 사이로 정규화합니다.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
###Output
_____no_output_____
###Markdown
노트 `preprocess_observation()` 함수가 책에 있는 것과 조금 다릅니다. 64비트 부동소수를 -1.0~1.0 사이로 나타내지 않고 부호있는 바이트(-128~127 사이)로 표현합니다. 이렇게 하는 이유는 재생 메모리가 약 8배나 적게 소모되기 때문입니다(52GB에서 6.5GB로). 정밀도를 감소시켜도 눈에 띄이게 훈련에 미치는 영향은 없습니다.
###Code
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("원본 관측 (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("전처리된 관측 (88×80 그레이스케일)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
DQN 만들기
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3은 11x10 크기의 64개의 맵을 가집니다
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9개의 행동이 가능합니다
initializer = tf.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state / 128.0 # 픽셀 강도를 [-1.0, 1.0] 범위로 스케일 변경합니다.
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
노트: 처음 책을 쓸 때는 타깃 Q-가치(y)와 예측 Q-가치(q_value) 사이의 제곱 오차를 사용했습니다. 하지만 매우 잡음이 많은 경험 때문에 작은 오차(1.0 이하)에 대해서만 손실에 이차식을 사용하고, 큰 오차에 대해서는 위의 계산식처럼 선형적인 손실(절대 오차의 두 배)을 사용하는 것이 더 낫습니다. 이렇게 하면 큰 오차가 모델 파라미터를 너무 많이 변경하지 못합니다. 또 몇 가지 하이퍼파라미터를 조정했습니다(작은 학습률을 사용하고 논문에 따르면 적응적 경사 하강법 알고리즘이 이따금 나쁜 성능을 낼 수 있으므로 Adam 최적화대신 네스테로프 가속 경사를 사용합니다). 아래에서 몇 가지 다른 하이퍼파라미터도 수정했습니다(재생 메모리 크기 확대, e-그리디 정책을 위한 감쇠 단계 증가, 할인 계수 증가, 온라인 DQN에서 타깃 DQN으로 복사 빈도 축소 등입니다).
###Code
from collections import deque
replay_memory_size = 500000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = np.random.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # 상태, 행동, 보상, 다음 상태, 계속
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
ReplayMemory 클래스를 사용한 방법 ================== 랜덤 억세스(random access)가 훨씬 빠르기 때문에 deque 대신에 ReplayMemory 클래스를 사용합니다(기여해 준 @NileshPS 님 감사합니다). 또 기본적으로 중복을 허용하여 샘플하면 큰 재생 메모리에서 중복을 허용하지 않고 샘플링하는 것보다 훨씬 빠릅니다.
###Code
class ReplayMemory:
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
def sample(self, batch_size, with_replacement=True):
if with_replacement:
indices = np.random.randint(self.length, size=batch_size) # 더 빠름
else:
indices = np.random.permutation(self.length)[:batch_size]
return self.buf[indices]
replay_memory_size = 500000
replay_memory = ReplayMemory(replay_memory_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # 상태, 행동, 보상, 다음 상태, 계속
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
=============================================
###Code
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # 랜덤 행동
else:
return np.argmax(q_values) # 최적 행동
n_steps = 4000000 # 전체 훈련 스텝 횟수
training_start = 10000 # 10,000번 게임을 반복한 후에 훈련을 시작합니다
training_interval = 4 # 4번 게임을 반복하고 훈련 스텝을 실행합니다
save_steps = 1000 # 1,000번 훈련 스텝마다 모델을 저장합니다
copy_steps = 10000 # 10,000번 훈련 스텝마다 온라인 DQN을 타깃 DQN으로 복사합니다
discount_rate = 0.99
skip_start = 90 # 게임의 시작 부분은 스킵합니다 (시간 낭비이므로).
batch_size = 50
iteration = 0 # 게임 반복횟수
checkpoint_path = "./my_dqn.ckpt"
done = True # 환경을 리셋해야 합니다
###Output
_____no_output_____
###Markdown
학습 과정을 트래킹하기 위해 몇 개의 변수가 필요합니다:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
이제 훈련 반복 루프입니다!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\r반복 {}\t훈련 스텝 {}/{} ({:.1f})%\t손실 {:5f}\t평균 최대-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # 게임이 종료되면 다시 시작합니다
obs = env.reset()
for skip in range(skip_start): # 게임 시작 부분은 스킵합니다
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# 온라인 DQN이 해야할 행동을 평가합니다
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# 온라인 DQN으로 게임을 플레이합니다.
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# 재생 메모리에 기록합니다
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# 트래킹을 위해 통계값을 계산합니다 (책에는 없습니다)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # 워밍엄 시간이 지난 후에 일정 간격으로 훈련합니다
# 메모리에서 샘플링하여 타깃 Q-가치를 얻기 위해 타깃 DQN을 사용합니다
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# 온라인 DQN을 훈련시킵니다
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# 온라인 DQN을 타깃 DQN으로 일정 간격마다 복사합니다
if step % copy_steps == 0:
copy_online_to_target.run()
# 일정 간격으로 저장합니다
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
INFO:tensorflow:Restoring parameters from ./my_dqn.ckpt
반복 13992 훈련 스텝 3999999/4000000 (100.0)% 손실 1.095662 평균 최대-Q 221.055817
###Markdown
아래 셀에서 에이전트를 테스트하기 위해 언제든지 위의 셀을 중지할 수 있습니다. 그런다음 다시 위의 셀을 실행하면 마지막으로 저장된 파라미터를 로드하여 훈련을 다시 시작할 것입니다.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# 온라인 DQN이 해야할 행동을 평가합니다
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# 온라인 DQN이 게임을 플레이합니다
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
video = plot_animation(frames, figsize=(5,6))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
추가 자료 브레이크아웃(Breakout)을 위한 전처리 다음은 Breakout-v0 아타리 게임을 위한 DQN을 훈련시키기 위해 사용할 수 있는 전처리 함수입니다:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # 자르고 크기를 줄입니다.
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("원본 관측 (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("전처리된 관측 (80×80 그레이스케일)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
여기서 볼 수 있듯이 하나의 이미지는 볼의 방향과 속도에 대한 정보가 없습니다. 이 정보들은 이 게임에 아주 중요합니다. 이런 이유로 실제로 몇 개의 연속된 관측을 연결하여 환경의 상태를 표현하는 것이 좋습니다. 한 가지 방법은 관측당 하나의 채널을 할당하여 멀티 채널 이미지를 만드는 것입니다. 다른 방법은 `np.max()` 함수를 사용해 최근의 관측을 모두 싱글 채널 이미지로 합치는 것입니다. 여기에서는 이전 이미지를 흐리게하여 DQN이 현재와 이전을 구분할 수 있도록 했습니다.
###Code
from collections import deque
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("멀티 채널 상태")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("싱글 채널 상태")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
연습문제 해답 1. to 7. 부록 A 참조. 8. BipedalWalker-v2 *문제: 정책 그래디언트를 사용해 OpenAI 짐의 ‘BypedalWalker-v2’를 훈련시켜보세요*
###Code
import gym
env = gym.make("BipedalWalker-v2")
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
###Markdown
노트: 만약 `BipedalWalker-v2` 환경을 만들 때 "`module 'Box2D._Box2D' has no attribute 'RAND_LIMIT'`"와 같은 이슈가 발생하면 다음과 같이 해보세요:```$ pip uninstall Box2D-kengz$ pip install git+https://github.com/pybox2d/pybox2d```
###Code
obs = env.reset()
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
obs
###Output
_____no_output_____
###Markdown
이 24개의 숫자에 대한 의미는 [온라인 문서](https://github.com/openai/gym/wiki/BipedalWalker-v2)를 참고하세요.
###Code
env.action_space
env.action_space.low
env.action_space.high
###Output
_____no_output_____
###Markdown
이는 각 다리의 엉덩이 관절의 토크와 발목 관절 토크를 제어하는 연속적인 4D 행동 공간입니다(-1에서 1까지). 연속적인 행동 공간을 다루기 위한 한 가지 방법은 이를 불연속적으로 나누는 것입니다. 예를 들어, 가능한 토크 값을 3개의 값 -1.0, 0.0, 1.0으로 제한할 수 있습니다. 이렇게 하면 가능한 행동은 $3^4=81$개가 됩니다.
###Code
from itertools import product
possible_torques = np.array([-1.0, 0.0, 1.0])
possible_actions = np.array(list(product(possible_torques, possible_torques, possible_torques, possible_torques)))
possible_actions.shape
tf.reset_default_graph()
# 1. 네트워크 구조를 정의합니다
n_inputs = env.observation_space.shape[0] # == 24
n_hidden = 10
n_outputs = len(possible_actions) # == 625
initializer = tf.variance_scaling_initializer()
# 2. 신경망을 만듭니다
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.selu,
kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
outputs = tf.nn.softmax(logits)
# 3. 추정 확률에 기초하여 무작위한 행동을 선택합니다
action_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=-1)
# 4. 훈련
learning_rate = 0.01
y = tf.one_hot(action_index, depth=len(possible_actions))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
아직 훈련되지 않았지만 이 정책 네트워크를 실행해 보죠.
###Code
def run_bipedal_walker(model_path=None, n_max_steps = 1000):
env = gym.make("BipedalWalker-v2")
frames = []
with tf.Session() as sess:
if model_path is None:
init.run()
else:
saver.restore(sess, model_path)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_index_val = action_index.eval(feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
if done:
break
env.close()
return frames
frames = run_bipedal_walker()
video = plot_animation(frames)
HTML(video.to_html5_video())
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
###Markdown
안되네요, 걷지를 못합니다. 그럼 훈련시켜 보죠!
###Code
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 1000
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}/{}".format(iteration + 1, n_iterations), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_index_val, gradients_val = sess.run([action_index, gradients],
feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_bipedal_walker_pg.ckpt")
frames = run_bipedal_walker("./my_bipedal_walker_pg.ckpt")
video = plot_animation(frames)
HTML(video.to_html5_video())
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_bipedal_walker_pg.ckpt
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exersices in chapter 16. Run in Google Colab **Warning**: this is the code for the 1st edition of the book. Please visit https://github.com/ageron/handson-ml2 for the 2nd edition code, with up-to-date notebooks using the latest library versions. In particular, the 1st edition is based on TensorFlow 1, while the 2nd edition uses TensorFlow 2, which is much simpler to use. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
import numpy as np
import os
import sklearn
import sys
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 1.x
!apt update && apt install -y libpq-dev libsdl2-dev swig xorg-dev xvfb
!pip install -q -U pyvirtualdisplay gym[atari,box2d]
IS_COLAB = True
except Exception:
IS_COLAB = False
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# To get smooth animations
import matplotlib.animation as animation
mpl.rc('animation', html='jshtml')
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
###Output
_____no_output_____
###Markdown
Note: there may be minor differences between the output of this notebook and the examples shown in the book. You can safely ignore these differences. They are mainly due to the fact that most of the environments provided by OpenAI gym have some randomness. Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
_____no_output_____
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
env.seed(42)
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). **Warning**: some environments require access to your display, which opens up a separate window, even if you specify `mode="rgb_array"`. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like [Xvfb](http://en.wikipedia.org/wiki/Xvfb). On Debian or Ubuntu:```bash$ apt update$ apt install -y xvfb```You can then start Jupyter using the `xvfb-run` command:```bash$ xvfb-run -s "-screen 0 1400x900x24" jupyter notebook```Alternatively, you can install the [pyvirtualdisplay](https://github.com/ponty/pyvirtualdisplay) Python library which wraps Xvfb:```bashpython3 -m pip install -U pyvirtualdisplay```And run the following code:
###Code
try:
import pyvirtualdisplay
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
except ImportError:
pass
env.render()
###Output
_____no_output_____
###Markdown
In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
img.shape
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
Saving figure MsPacman
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.seed(42)
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
plt.show()
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
env.seed(42)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation:
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
anim = animation.FuncAnimation(
fig, update_scene, fargs=(frames, patch),
frames=len(frames), repeat=repeat, interval=interval)
plt.close()
return anim
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
env.seed(42)
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment...
###Code
plot_environment(env)
plt.show()
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
env.seed(42)
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plot_environment(env)
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_environment(env)
plt.show()
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
env.seed(42)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.The main differences relevant to this chapter are:* the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).* the `weights` parameter was renamed to `kernel`,* the default activation is `None` instead of `tf.nn.relu`
###Code
import tensorflow as tf
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
WARNING:tensorflow:From <ipython-input-36-e360db0650cb>:12: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.
Instructions for updating:
Use keras.layers.Dense instead.
WARNING:tensorflow:From /Users/ageron/miniconda3/envs/tf1/lib/python3.7/site-packages/tensorflow_core/python/layers/core.py:187: Layer.apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
Please use `layer.__call__` method instead.
WARNING:tensorflow:From <ipython-input-36-e360db0650cb>:18: multinomial (from tensorflow.python.ops.random_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.random.categorical` instead.
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
env.seed(42)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
WARNING:tensorflow:From /Users/ageron/miniconda3/envs/tf1/lib/python3.7/site-packages/tensorflow_core/python/ops/nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps=1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning Q-Learning works by watching an agent play (e.g., randomly) and gradually improving its estimates of the Q-Values. Once it has accurate Q-Value estimates (or close enough), then the optimal policy consists in choosing the action that has the highest Q-Value (i.e., the greedy policy).
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to Play MsPacman Using the DQN Algorithm **Warning**: Unfortunately, the first version of the book contained two important errors in this section.1. The actor DQN and critic DQN should have been named _online DQN_ and _target DQN_ respectively. Actor-critic algorithms are a distinct class of algorithms.2. The online DQN is the one that learns and is copied to the target DQN at regular intervals. The target DQN's only role is to estimate the next state's Q-Values for each possible action. This is needed to compute the target Q-Values for training the online DQN, as shown in this equation:$y(s,a) = \text{r} + \gamma . \underset{a'}{\max} \, Q_\text{target}(s', a')$* $y(s,a)$ is the target Q-Value to train the online DQN for the state-action pair $(s, a)$.* $r$ is the reward actually collected after playing action $a$ in state $s$.* $\gamma$ is the discount rate.* $s'$ is the state actually reached after played action $a$ in state $s$.* $a'$ is one of the possible actions in state $s'$.* $Q_\text{target}(s', a')$ is the target DQN's estimate of the Q-Value of playing action $a'$ while in state $s'$.I hope these errors did not affect you, and if they did, I sincerely apologize. Creating the MsPacman environment
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = 210 + 164 + 74
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.sum(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img // 3 - 128).astype(np.int8) # normalize from -128 to 127
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
###Output
_____no_output_____
###Markdown
Note: the `preprocess_observation()` function is slightly different from the one in the book: instead of representing pixels as 64-bit floats from -1.0 to 1.0, it represents them as signed bytes (from -128 to 127). The benefit is that the replay memory will take up roughly 8 times less RAM (about 6.5 GB instead of 52 GB). The reduced precision has no visible impact on training.
###Code
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN Note: instead of using `tf.contrib.layers.convolution2d()` or `tf.contrib.layers.conv2d()` (as in the first version of the book), we now use the `tf.layers.conv2d()`, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:* the `num_outputs` parameter was renamed to `filters`,* the `stride` parameter was renamed to `strides`,* the `_fn` suffix was removed from parameter names that had it (e.g., `activation_fn` was renamed to `activation`),* the `weights_initializer` parameter was renamed to `kernel_initializer`,* the weights variable was renamed to `"kernel"` (instead of `"weights"`), and the biases variable was renamed from `"biases"` to `"bias"`,* and the default `activation` is now `None` instead of `tf.nn.relu`.
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9 discrete actions are available
initializer = tf.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state / 128.0 # scale pixel intensities to the [-1.0, 1.0] range.
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Note: in the first version of the book, the loss function was simply the squared error between the target Q-Values (`y`) and the estimated Q-Values (`q_value`). However, because the experiences are very noisy, it is better to use a quadratic loss only for small errors (below 1.0) and a linear loss (twice the absolute error) for larger errors, which is what the code above computes. This way large errors don't push the model parameters around as much. Note that we also tweaked some hyperparameters (using a smaller learning rate, and using Nesterov Accelerated Gradients rather than Adam optimization, since adaptive gradient algorithms may sometimes be bad, according to this [paper](https://arxiv.org/abs/1705.08292)). We also tweaked a few other hyperparameters below (a larger replay memory, longer decay for the $\epsilon$-greedy policy, larger discount rate, less frequent copies of the online DQN to the target DQN, etc.). We use this `ReplayMemory` class instead of a `deque` because it is much faster for random access (thanks to @NileshPS who contributed it). Moreover, we default to sampling with replacement, which is much faster than sampling without replacement for large replay memories.
###Code
class ReplayMemory:
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
def sample(self, batch_size, with_replacement=True):
if with_replacement:
indices = np.random.randint(self.length, size=batch_size) # faster
else:
indices = np.random.permutation(self.length)[:batch_size]
return self.buf[indices]
replay_memory_size = 500000
replay_memory = ReplayMemory(replay_memory_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 4000000 # total number of training steps
training_start = 10000 # start training after 10,000 game iterations
training_interval = 4 # run a training step every 4 game iterations
save_steps = 1000 # save the model every 1,000 training steps
copy_steps = 10000 # copy online DQN to target DQN every 10,000 training steps
discount_rate = 0.99
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset
###Output
_____no_output_____
###Markdown
A few variables for tracking progress:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
And now the main training loop!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f})%\tLoss {:5f}\tMean Max-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Online DQN plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# Compute statistics for tracking progress (not shown in the book)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # only train after warmup period and at regular intervals
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# Train the online DQN
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy the online DQN to the target DQN
if step % copy_steps == 0:
copy_online_to_target.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
INFO:tensorflow:Restoring parameters from ./my_dqn.ckpt
###Markdown
You can interrupt the cell above at any time to test your agent using the cell below. You can then run the cell above once again, it will load the last parameters saved and resume training.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# Online DQN plays
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Extra material Preprocessing for Breakout Here is a preprocessing function you can use to train a DQN for the Breakout-v0 Atari game:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # crop and downsize
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (80×80 grayscale)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, a single image does not give you the direction and speed of the ball, which are crucial informations for playing this game. For this reason, it is best to actually combine several consecutive observations to create the environment's state representation. One way to do that is to create a multi-channel image, with one channel per recent observation. Another is to merge all recent observations into a single-channel image, using `np.max()`. In this case, we need to dim the older images so that the DQN can distinguish the past from the present.
###Code
from collections import deque
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Multichannel state")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("Singlechannel state")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Exercise solutions 1. to 7. See Appendix A. 8. BipedalWalker-v3 Exercise: _Use policy gradients to tackle OpenAI gym's "BipedalWalker-v3"._
###Code
import gym
env = gym.make("BipedalWalker-v3")
obs = env.reset()
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
obs
###Output
_____no_output_____
###Markdown
You can find the meaning of each of these 24 numbers in the [documentation](https://github.com/openai/gym/wiki/BipedalWalker-v2).
###Code
env.action_space
env.action_space.low
env.action_space.high
###Output
_____no_output_____
###Markdown
This is a 4D continuous action space controling each leg's hip torque and knee torque (from -1 to 1). To deal with a continuous action space, one method is to discretize it. For example, let's limit the possible torque values to these 3 values: -1.0, 0.0, and 1.0. This means that we are left with $3^4=81$ possible actions.
###Code
from itertools import product
possible_torques = np.array([-1.0, 0.0, 1.0])
possible_actions = np.array(list(product(possible_torques, possible_torques, possible_torques, possible_torques)))
possible_actions.shape
tf.reset_default_graph()
# 1. Specify the network architecture
n_inputs = env.observation_space.shape[0] # == 24
n_hidden = 10
n_outputs = len(possible_actions) # == 625
initializer = tf.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.selu,
kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
outputs = tf.nn.softmax(logits)
# 3. Select a random action based on the estimated probabilities
action_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=-1)
# 4. Training
learning_rate = 0.01
y = tf.one_hot(action_index, depth=len(possible_actions))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Let's try running this policy network, although it is not trained yet.
###Code
def run_bipedal_walker(model_path=None, n_max_steps = 1000):
env = gym.make("BipedalWalker-v2")
frames = []
with tf.Session() as sess:
if model_path is None:
init.run()
else:
saver.restore(sess, model_path)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_index_val = action_index.eval(feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
if done:
break
env.close()
return frames
frames = run_bipedal_walker()
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Nope, it really can't walk. So let's train it!
###Code
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 1000
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}/{}".format(iteration + 1, n_iterations), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_index_val, gradients_val = sess.run([action_index, gradients],
feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_bipedal_walker_pg.ckpt")
frames = run_bipedal_walker("./my_bipedal_walker_pg.ckpt")
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Not the best walker, but at least it stays up and makes (slow) progress to the right.A better solution for this problem is to use an actor-critic algorithm, as it does not require discretizing the action space, and it converges much faster. Check out this nice [blog post](https://towardsdatascience.com/reinforcement-learning-w-keras-openai-actor-critic-models-f084612cfd69) by Yash Patel for more details. 9. Pong DQN Let's explore the `Pong-v0` OpenAI Gym environment.
###Code
import gym
env = gym.make('Pong-v0')
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
We see the observation space is a 210x160 RGB image. The action space is a `Discrete(6)` space with 6 different actions: actions 0 and 1 do nothing, actions 2 and 4 move the paddle up, and finally actions 3 and 5 move the paddle down. The paddle is free to move immediately but the ball does not appear until after 18 steps into the episode.Let's play a game with a completely random policy and plot the resulting animation.
###Code
# A helper function to run an episode of Pong. It's first argument should be a
# function which takes the observation of the environment and the current
# iteration and produces an action for the agent to take.
def run_episode(policy, n_max_steps=1000, frames_per_action=1):
obs = env.reset()
frames = []
for i in range(n_max_steps):
obs, reward, done, info = env.step(policy(obs, i))
frames.append(env.render(mode='rgb_array'))
if done:
break
return plot_animation(frames)
run_episode(lambda obs, i: np.random.randint(0, 5))
###Output
_____no_output_____
###Markdown
The random policy does not fare very well. So let's try to use the DQN and see if we can do better.First let's write a preprocessing function to scale down the input state. Since a single observation does not tell us about the ball's velocity, we will also need to combine multiple observations into a single state. Below is the preprocessing code for this environment. The preprocessing algorithm is two-fold:1. Convert the image in the observation to an image to only black and white and scale it down to 80x80 pixels.2. Combine 3 observations into a single state which depicts the velocity of the paddles and the ball.
###Code
green_paddle_color = (92, 186, 92)
red_paddle_color = (213, 130, 74)
background_color = (144, 72, 17)
ball_color = (236, 236, 236)
def preprocess_observation(obs):
img = obs[34:194:2, ::2].reshape(-1, 3)
tmp = np.full(shape=(80 * 80), fill_value=0.0, dtype=np.float32)
for i, c in enumerate(img):
c = tuple(c)
if c in {green_paddle_color, red_paddle_color, ball_color}:
tmp[i] = 1.0
else:
tmp[i] = 0.0
return tmp.reshape(80, 80)
obs = env.reset()
for _ in range(25):
obs, _, _, _ = env.step(0)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title('Original Observation (160 x 210 RGB)')
plt.imshow(obs)
plt.axis('off')
plt.subplot(122)
plt.title('Preprocessed Observation (80 x 80 Grayscale)')
plt.imshow(preprocess_observation(obs), interpolation='nearest', cmap='gray')
plt.axis('off')
plt.show()
def combine_observations(preprocess_observations, dim_factor=0.75):
dimmed = [obs * (dim_factor ** idx)
for idx, obs in enumerate(reversed(preprocess_observations))]
return np.max(np.array(dimmed), axis=0)
n_observations_per_state = 3
obs = env.reset()
for _ in range(20):
obs, _, _, _ = env.step(0)
preprocess_observations = []
for _ in range(n_observations_per_state):
obs, _, _, _ = env.step(2)
preprocess_observations.append(preprocess_observation(obs))
img = combine_observations(preprocess_observations)
plt.figure(figsize=(6, 6))
plt.title('Combined Observations as a Single State')
plt.imshow(img, interpolation='nearest', cmap='gray')
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Now we are going to build the DQN. Like the DQN for Pac-Man, this model will train 3 convolutional layers, then a hidden fully connected layer, then finally a fully connected layer with 6 neurons, one representing each possible output.
###Code
reset_graph()
input_width = 80
input_height = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [9, 5, 3]
conv_kernel_strides = [4, 2, 1]
conv_paddings = ['VALID'] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 5 * 5 * 64
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n
he_init = tf.contrib.layers.variance_scaling_initializer()
###Output
_____no_output_____
###Markdown
This model will use two DQNs, an online DQN and a target DQN. The online DQN learns new parameters at each training step. The target DQN is used to compute the target Q-Values for the online DQN's loss function during training. The online DQN's parameters are copied to the target DQN at regular intervals.
###Code
def q_network(X_state, name):
prev_layer = X_state
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_kernel_strides, conv_paddings,
conv_activation):
prev_layer = tf.layers.conv2d(prev_layer, filters=n_maps,
kernel_size=kernel_size,
strides=strides, padding=padding,
activation=activation,
kernel_initializer=he_init)
flattened = tf.reshape(prev_layer, [-1, n_hidden_in])
hidden = tf.layers.dense(flattened, n_hidden,
activation=hidden_activation,
kernel_initializer=he_init)
outputs = tf.layers.dense(hidden, n_outputs, kernel_initializer=he_init)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
# Starting the DQN definition.
X_state = tf.placeholder(tf.float32, shape=(None, input_height, input_width,
input_channels))
online_q_values, online_vars = q_network(X_state, 'q_networks/online')
target_q_values, target_vars = q_network(X_state, 'q_networks/target')
copy_ops = [var.assign(online_vars[name]) for name, var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
# Defining the training objective.
learning_rate = 1e-3
momentum = 0.95
with tf.variable_scope('training') as scope:
X_action = tf.placeholder(tf.int32, shape=(None,))
y = tf.placeholder(tf.float32, shape=(None, 1))
Q_target = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
error = tf.abs(y - Q_target)
loss = tf.reduce_mean(tf.square(error))
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum,
use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
This model will sample past experiences from a _Replay Memory_, this will hopefully help the model learn what higher level patterns to pay attention to to find the right action. It also reduces the chance that the model's behavior gets too correlated to it's most recent experiences.The replay memory will store its data in the kernel's memory.
###Code
class ReplayMemory(object):
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.index += 1
self.index %= self.maxlen
self.length = min(self.length + 1, self.maxlen)
def sample(self, batch_size):
return self.buf[np.random.randint(self.length, size=batch_size)]
replay_size = 200000
replay_memory = ReplayMemory(replay_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], \
cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
Now let's define the model's policy during training. Just like in `MsPacMan.ipynb`, we will use an $\varepsilon$-greedy policy.
###Code
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 6000000
def epsilon_greedy(q_values, step):
epsilon = min(eps_min,
eps_max - ((eps_max - eps_min) * (step / eps_decay_steps)))
if np.random.random() < epsilon:
return np.random.randint(n_outputs)
return np.argmax(q_values)
###Output
_____no_output_____
###Markdown
Now we will train the model to play some Pong. The model will input an action once every 3 frames. The preprocessing functions defined above will use the 3 frames to compute the state the model will use to
###Code
n_steps = 10000000
training_start = 100000
training_interval = 4
save_steps = 1000
copy_steps = 10000
discount_rate = 0.95
skip_start = 20
batch_size = 50
iteration = 0
done = True # To reset the environment at the start.
loss_val = np.infty
game_length = 0
total_max_q = 0.0
mean_max_q = 0.0
checkpoint_path = "./pong_dqn.ckpt"
# Utility function to get the environment state for the model.
def perform_action(action):
preprocess_observations = []
total_reward = 0.0
for i in range(3):
obs, reward, done, info = env.step(action)
total_reward += reward
if done:
for _ in range(i, 3):
preprocess_observations.append(preprocess_observation(obs))
break
else:
preprocess_observations.append(preprocess_observation(obs))
return combine_observations(preprocess_observations).reshape(80, 80, 1), \
total_reward, done
# Main training loop
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + '.index'):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print('\rIteration {}\tTraining step {}/{} ({:.1f})%\tLoss {:5f}'
'\tMean Max-Q {:5f} '.format(
iteration, step, n_steps, 100 * step / n_steps, loss_val,
mean_max_q),
end='')
if done:
obs = env.reset()
for _ in range(skip_start):
obs, reward, done, info = env.step(0)
state, reward, done = perform_action(0)
# Evaluate the next action for the agent.
q_values = online_q_values.eval(
feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# The online DQN plays the game.
next_state, reward, done = perform_action(action)
# Save the result in the ReplayMemory.
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# Compute statistics which help us monitor how training is going.
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
# Only train after the warmup rounds and only every few rounds.
if iteration < training_start or iteration % training_interval != 0:
continue
# Sample memories from the reply memory.
X_state_val, X_action_val, rewards, X_next_state_val, continues = \
sample_memories(batch_size)
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# Train the online DQN.
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val,
X_action: X_action_val,
y: y_val,
})
# Regularly copy the online DQN to the target DQN.
if step % copy_steps == 0:
copy_online_to_target.run()
# Regularly save the model.
if step and step % save_steps == 0:
saver.save(sess, checkpoint_path)
preprocess_observations = []
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
def dqn_policy(obs, i):
if len(preprocess_observations) < 3:
preprocess_observations.append(preprocess_observation(obs))
if len(preprocess_observations) == 3:
state = combine_observations(preprocess_observations)
q_values = online_q_values.eval(
feed_dict={X_state: [state.reshape(80, 80, 1)]})
dqn_policy.cur_action = np.argmax(q_values)
return dqn_policy.cur_action
preprocess_observations[i % 3] = preprocess_observation(obs)
if i % 3 == 2:
state = combine_observations(preprocess_observations)
q_values = online_q_values.eval(
feed_dict={X_state: [state.reshape(80, 80, 1)]})
dqn_policy.cur_action = np.argmax(q_values)
return dqn_policy.cur_action
dqn_policy.cur_action = 0
html = run_episode(dqn_policy, n_max_steps=10000)
html
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1276: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.
Instructions for updating:
Use standard file APIs to check for files with this prefix.
INFO:tensorflow:Restoring parameters from /content/gdrive/My Drive/models/pong_dqn.ckpt
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exercices in chapter 16. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import numpy.random as rnd
import os
import sys
# to make this notebook's output stable across runs
rnd.seed(42)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
[2017-02-17 10:57:41,836] Making new env: MsPacman-v0
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation (it's a bit jittery within Jupyter):
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. Fixing the rendering issue Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebookIf Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function
except Exception:
openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# use OpenAI gym's rendering function
return env.render(mode="rgb_array")
else:
# rendering for the cart pole environment (in case OpenAI gym can't do it)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # Blue Green Red
pole_col = 0x669acc # Blue Green Red
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`.
###Code
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.contrib.layers.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu,
weights_initializer=initializer)
outputs = fully_connected(hidden, n_outputs, activation_fn=tf.nn.sigmoid,
weights_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
tf.reset_default_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)
logits = fully_connected(hidden, n_outputs, activation_fn=None)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
###Output
[2017-02-17 10:58:55,704] Making new env: CartPole-v0
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
tf.reset_default_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)
logits = fully_connected(hidden, n_outputs, activation_fn=None)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
###Output
[2017-02-17 11:06:16,047] Making new env: CartPole-v0
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = rnd.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return rnd.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = rnd.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning Q-Learning will learn the optimal policy by watching the random policy play.
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to play MsPacman using Deep Q-Learning
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = np.array([210, 164, 74]).mean()
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.mean(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img - 128) / 128 - 1 # normalize from -1. to 1.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN
###Code
tf.reset_default_graph()
from tensorflow.contrib.layers import convolution2d, fully_connected
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"]*3
conv_activation = [tf.nn.relu]*3
n_hidden_inputs = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n
initializer = tf.contrib.layers.variance_scaling_initializer()
learning_rate = 0.01
def q_network(X_state, scope):
prev_layer = X_state
conv_layers = []
with tf.variable_scope(scope) as scope:
for n_maps, kernel_size, stride, padding, activation in zip(conv_n_maps, conv_kernel_sizes, conv_strides, conv_paddings, conv_activation):
prev_layer = convolution2d(prev_layer, num_outputs=n_maps, kernel_size=kernel_size, stride=stride, padding=padding, activation_fn=activation, weights_initializer=initializer)
conv_layers.append(prev_layer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_inputs])
hidden = fully_connected(last_conv_layer_flat, n_hidden, activation_fn=hidden_activation, weights_initializer=initializer)
outputs = fully_connected(hidden, n_outputs, activation_fn=None)
trainable_vars = {var.name[len(scope.name):]: var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)}
return outputs, trainable_vars
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels])
actor_q_values, actor_vars = q_network(X_state, scope="q_networks/actor") # acts
critic_q_values, critic_vars = q_network(X_state, scope="q_networks/critic") # learns
copy_ops = [actor_var.assign(critic_vars[var_name])
for var_name, actor_var in actor_vars.items()]
copy_critic_to_actor = tf.group(*copy_ops)
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(critic_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keep_dims=True)
cost = tf.reduce_mean(tf.square(y - q_value))
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cost, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
actor_vars
from collections import deque
replay_memory_size = 10000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = rnd.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.05
eps_max = 1.0
eps_decay_steps = 50000
import sys
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if rnd.rand() < epsilon:
return rnd.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 100000 # total number of training steps
training_start = 1000 # start training after 1,000 game iterations
training_interval = 3 # run a training step every 3 game iterations
save_steps = 50 # save the model every 50 training steps
copy_steps = 25 # copy the critic to the actor every 25 training steps
discount_rate = 0.95
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset
with tf.Session() as sess:
if os.path.isfile(checkpoint_path):
saver.restore(sess, checkpoint_path)
else:
init.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f}%)".format(iteration, step, n_steps, step * 100 / n_steps), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip boring game iterations at the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Actor evaluates what to do
q_values = actor_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Actor plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
if iteration < training_start or iteration % training_interval != 0:
continue
# Critic learns
X_state_val, X_action_val, rewards, X_next_state_val, continues = sample_memories(batch_size)
next_q_values = actor_q_values.eval(feed_dict={X_state: X_next_state_val})
y_val = rewards + continues * discount_rate * np.max(next_q_values, axis=1, keepdims=True)
training_op.run(feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy critic to actor
if step % copy_steps == 0:
copy_critic_to_actor.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
Iteration 328653 Training step 100000/100000 (100.0%)
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exercices in chapter 16. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import sys
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
Note: there may be minor differences between the output of this notebook and the examples shown in the book. You can safely ignore these differences. They are mainly due to the fact that most of the environments provided by OpenAI gym have some randomness. Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
[2017-06-08 07:37:31,348] Making new env: MsPacman-v0
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation (it's a bit jittery within Jupyter):
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. Fixing the rendering issue Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebookIf Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function
except Exception:
openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# use OpenAI gym's rendering function
return env.render(mode="rgb_array")
else:
# rendering for the cart pole environment (in case OpenAI gym can't do it)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # Blue Green Red
pole_col = 0x669acc # Blue Green Red
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.The main differences relevant to this chapter are:* the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).* the `weights` parameter was renamed to `kernel`,* the default activation is `None` instead of `tf.nn.relu`
###Code
import tensorflow as tf
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.contrib.layers.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
###Output
[2017-06-08 07:39:15,860] Making new env: CartPole-v0
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
###Output
[2017-06-08 08:01:35,953] Making new env: CartPole-v0
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = rnd.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return rnd.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = rnd.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning Q-Learning will learn the optimal policy by watching the random policy play.
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to play MsPacman using Deep Q-Learning
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = np.array([210, 164, 74]).mean()
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.mean(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img - 128) / 128 - 1 # normalize from -1. to 1.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN Note: instead of using `tf.contrib.layers.convolution2d()` or `tf.contrib.layers.conv2d()` (as in the book), we now use the `tf.layers.conv2d()`, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:* the `num_outputs` parameter was renamed to `filters`,* the `stride` parameter was renamed to `strides`,* the `_fn` suffix was removed from parameter names that had it (e.g., `activation_fn` was renamed to `activation`),* the `weights_initializer` parameter was renamed to `kernel_initializer`,* the weights variable was renamed to `"kernel"` (instead of `"weights"`), and the biases variable was renamed from `"biases"` to `"bias"`,* and the default `activation` is now `None` instead of `tf.nn.relu`.
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"]*3
conv_activation = [tf.nn.relu]*3
n_hidden_inputs = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n
initializer = tf.contrib.layers.variance_scaling_initializer()
learning_rate = 0.01
def q_network(X_state, scope):
prev_layer = X_state
conv_layers = []
with tf.variable_scope(scope) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(conv_n_maps, conv_kernel_sizes, conv_strides, conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(prev_layer, filters=n_maps, kernel_size=kernel_size, strides=strides, padding=padding, activation=activation, kernel_initializer=initializer)
conv_layers.append(prev_layer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_inputs])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden, activation=hidden_activation, kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs)
trainable_vars = {var.name[len(scope.name):]: var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)}
return outputs, trainable_vars
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels])
actor_q_values, actor_vars = q_network(X_state, scope="q_networks/actor") # acts
critic_q_values, critic_vars = q_network(X_state, scope="q_networks/critic") # learns
copy_ops = [actor_var.assign(critic_vars[var_name])
for var_name, actor_var in actor_vars.items()]
copy_critic_to_actor = tf.group(*copy_ops)
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(critic_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keep_dims=True)
cost = tf.reduce_mean(tf.square(y - q_value))
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cost, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
actor_vars
from collections import deque
replay_memory_size = 10000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = rnd.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.05
eps_max = 1.0
eps_decay_steps = 50000
import sys
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if rnd.rand() < epsilon:
return rnd.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 100000 # total number of training steps
training_start = 1000 # start training after 1,000 game iterations
training_interval = 3 # run a training step every 3 game iterations
save_steps = 50 # save the model every 50 training steps
copy_steps = 25 # copy the critic to the actor every 25 training steps
discount_rate = 0.95
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset
with tf.Session() as sess:
if os.path.isfile(checkpoint_path):
saver.restore(sess, checkpoint_path)
else:
init.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f}%)".format(iteration, step, n_steps, step * 100 / n_steps), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip boring game iterations at the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Actor evaluates what to do
q_values = actor_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Actor plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
if iteration < training_start or iteration % training_interval != 0:
continue
# Critic learns
X_state_val, X_action_val, rewards, X_next_state_val, continues = sample_memories(batch_size)
next_q_values = actor_q_values.eval(feed_dict={X_state: X_next_state_val})
y_val = rewards + continues * discount_rate * np.max(next_q_values, axis=1, keepdims=True)
training_op.run(feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy critic to actor
if step % copy_steps == 0:
copy_critic_to_actor.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
Iteration 300999 Training step 99999/100000 (100.0%)
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exercices in chapter 16. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import numpy.random as rnd
import os
import sys
# to make this notebook's output stable across runs
rnd.seed(42)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
[2017-04-29 19:57:50,476] Making new env: MsPacman-v0
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation (it's a bit jittery within Jupyter):
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. Fixing the rendering issue Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebookIf Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function
except Exception:
openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# use OpenAI gym's rendering function
return env.render(mode="rgb_array")
else:
# rendering for the cart pole environment (in case OpenAI gym can't do it)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # Blue Green Red
pole_col = 0x669acc # Blue Green Red
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.The main differences relevant to this chapter are:* the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).* the `weights` parameter was renamed to `kernel`,* the default activation is `None` instead of `tf.nn.relu`
###Code
import tensorflow as tf
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.contrib.layers.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
tf.reset_default_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
###Output
[2017-04-29 19:58:43,660] Making new env: CartPole-v0
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
tf.reset_default_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
###Output
[2017-04-29 20:01:49,708] Making new env: CartPole-v0
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = rnd.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return rnd.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = rnd.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning Q-Learning will learn the optimal policy by watching the random policy play.
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to play MsPacman using Deep Q-Learning
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = np.array([210, 164, 74]).mean()
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.mean(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img - 128) / 128 - 1 # normalize from -1. to 1.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN Note: instead of using `tf.contrib.layers.convolution2d()` or `tf.contrib.layers.conv2d()` (as in the book), we now use the `tf.layers.conv2d()`, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:* the `num_outputs` parameter was renamed to `filters`,* the `stride` parameter was renamed to `strides`,* the `_fn` suffix was removed from parameter names that had it (e.g., `activation_fn` was renamed to `activation`),* the `weights_initializer` parameter was renamed to `kernel_initializer`,* the weights variable was renamed to `"kernel"` (instead of `"weights"`), and the biases variable was renamed from `"biases"` to `"bias"`,* and the default `activation` is now `None` instead of `tf.nn.relu`.
###Code
tf.reset_default_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"]*3
conv_activation = [tf.nn.relu]*3
n_hidden_inputs = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n
initializer = tf.contrib.layers.variance_scaling_initializer()
learning_rate = 0.01
def q_network(X_state, scope):
prev_layer = X_state
conv_layers = []
with tf.variable_scope(scope) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(conv_n_maps, conv_kernel_sizes, conv_strides, conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(prev_layer, filters=n_maps, kernel_size=kernel_size, strides=strides, padding=padding, activation=activation, kernel_initializer=initializer)
conv_layers.append(prev_layer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_inputs])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden, activation=hidden_activation, kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs)
trainable_vars = {var.name[len(scope.name):]: var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)}
return outputs, trainable_vars
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels])
actor_q_values, actor_vars = q_network(X_state, scope="q_networks/actor") # acts
critic_q_values, critic_vars = q_network(X_state, scope="q_networks/critic") # learns
copy_ops = [actor_var.assign(critic_vars[var_name])
for var_name, actor_var in actor_vars.items()]
copy_critic_to_actor = tf.group(*copy_ops)
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(critic_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keep_dims=True)
cost = tf.reduce_mean(tf.square(y - q_value))
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cost, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
actor_vars
from collections import deque
replay_memory_size = 10000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = rnd.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.05
eps_max = 1.0
eps_decay_steps = 50000
import sys
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if rnd.rand() < epsilon:
return rnd.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 100000 # total number of training steps
training_start = 1000 # start training after 1,000 game iterations
training_interval = 3 # run a training step every 3 game iterations
save_steps = 50 # save the model every 50 training steps
copy_steps = 25 # copy the critic to the actor every 25 training steps
discount_rate = 0.95
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset
with tf.Session() as sess:
if os.path.isfile(checkpoint_path):
saver.restore(sess, checkpoint_path)
else:
init.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f}%)".format(iteration, step, n_steps, step * 100 / n_steps), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip boring game iterations at the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Actor evaluates what to do
q_values = actor_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Actor plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
if iteration < training_start or iteration % training_interval != 0:
continue
# Critic learns
X_state_val, X_action_val, rewards, X_next_state_val, continues = sample_memories(batch_size)
next_q_values = actor_q_values.eval(feed_dict={X_state: X_next_state_val})
y_val = rewards + continues * discount_rate * np.max(next_q_values, axis=1, keepdims=True)
training_op.run(feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy critic to actor
if step % copy_steps == 0:
copy_critic_to_actor.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
Iteration 328653 Training step 100000/100000 (100.0%)
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exersices in chapter 16. Install Libs
###Code
#!pip install --upgrade pip
#!pip install --upgrade gym
#!pip install atari-py
###Output
_____no_output_____
###Markdown
Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import sys
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
Note: there may be minor differences between the output of this notebook and the examples shown in the book. You can safely ignore these differences. They are mainly due to the fact that most of the environments provided by OpenAI gym have some randomness. Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`: Next we will load the MsPacman environment, version 0.
###Code
import gym
env = gym.make('MsPacman-v0')
###Output
_____no_output_____
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation (it's a bit jittery within Jupyter):
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
# 创建CartPole-v0环境
env = gym.make("CartPole-v0")
# 初始化环境,返回的obs表示环境当前的观察报告
obs = env.reset()
# 对于CartPole-v0来说,观察报告包括(小推车水平位置(0.0是中心),速度,杆的角度(0.0是90度),杆的角速度)
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. Fixing the rendering issue 如果动画在Jupytor Notebook中不能正常显示,参考下面说明 Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebookIf Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
###Code
from PIL import Image, ImageDraw
try:
# 尝试使用OpenAI的渲染函数
# firstly try to use OpenAI gym's rendering function
from pyglet.gl import gl_info
openai_cart_pole_rendering = True
except Exception:
# 使用系统自带的渲染函数,如果没有可用的X Server来开启OpenAI的渲染函数
# probably no X server available, let's use our own rendering function
openai_cart_pole_rendering = False
print("openai_cart_pole_rendering: ", openai_cart_pole_rendering)
# 渲染函数
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# 使用OpenAI GYM自带的渲染函数
return env.render(mode="rgb_array")
else:
# 自定义渲染 (OpenAI GYM渲染函数不可用时使用)
# 配置参数
img_w = 600 # 显示区域宽度(像素值)
img_h = 400 # 显示区域高度(像素值)
cart_w = img_w // 12 # 小车的宽度(像素值)
cart_h = img_h // 15 # 校车的高度(像素值)
pole_len = img_h // 3.5 # 杆子的高度(像素值)
pole_w = img_w // 80 + 1 # 杆子的宽度(像素值)
x_width = 2 # 横坐标(宽度方向)刻度的宽度(像素值)
max_ang = 0.2 # 杆子最大倾角
bg_col = (255, 255, 255) # 背景色
cart_col = 0x000000 # 小车颜色(Blue Green Red)
pole_col = 0x669acc # 杆子颜色(Blue Green Red)
# 环境观察报告
pos, vel, ang, ang_vel = obs #(小推车水平位置(0.0是中心),速度,杆的角度(0.0是90度),杆的角速度)
# 部件坐标
cart_x = pos * img_w // x_width + img_w // x_width # 小车横坐标
cart_y = img_h * 95 // 100 # 小车纵坐标
top_pole_x = cart_x + pole_len * np.sin(ang) # 杆子顶部横坐标
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang) # 杆子顶部纵坐标
# 画图:背景,地面,小车,杆子
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
# 画图函数
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
# 绘制初始化环境
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
# 查看这个支持哪些操作
# Discrete(2)表示支持两个操作: 0:向左加速; 1:向右加速
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
# 模拟一个运行场景(每一步都向左加速)
obs = env.reset()
while True:
# env.step(0): 向左加速; env.step(1):向右加速
# 返回值:
# obs: 新的观测结果, 小推车水平位置(0.0是中心),速度(大于0表示向右),杆的角度(0.0是90度,大于0表示右倾),杆的角速度(大于0表示右加速)
# reward: 没做1步都会获得0.1的回报,目标是尽可能运行更长时间
# done: True表示实验结束
# info: 调试信息
obs, reward, done, info = env.step(0)
print(reward, done, obs, info)
if done:
break
# 绘制上面场景结束时的状态
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
# 模拟一个运行场景(每一步都向右加速)
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
print(reward, done, obs, info)
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
# 不使用policy,hard code的策略
# 最终录到视频中的动画帧
frames = []
# 参数
n_max_steps = 1000
n_change_steps = 10
# 运行最多1000步
obs = env.reset()
for step in range(n_max_steps):
# 渲染当前画面,并添加到动画帧中
img = render_cart_pole(env, obs)
frames.append(img)
# 获得当前的环境报告:小车水平位置(0.0是中心),速度(大于0表示向右),杆的角度(0.0是90度,大于0表示右倾),杆的角速度(大于0表示右加速)
position, velocity, angle, angular_velocity = obs
# 执行下一步操作:杆倾角偏左则让小车向左加速,杆倾角偏右则让小车向右加速,
if angle < 0:
action = 0
else:
action = 1
# 检查执行操作后,是否触发游戏停止
obs, reward, done, info = env.step(action)
if done:
break
# 查看总共维持了多少步
print(len(frames))
# 回放运行过程
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies **使用神经网络来操作小车****方法:**神经网络从环境中获取输入,预算每种操作的正确概率,作为输出****输出:**CartPole只有两种输出(向左加速,向右加速),因此只需要一个输出神经元,该神经元输出值p为向左加速操作的正确概率,则1-p是向右加速的正确概率**输入:**CartPole的环境比较简单,只需要考虑当前的环境(小车水平位置(0.0是中心),速度(大于0表示向右),杆的角度(0.0是90度,大于0表示右倾),杆的角速度(大于0表示右加速),使用4个输入神经元**注意有些环境包含隐藏状态,需要考虑过去的行为和观察,下面是两个例子:**1. 例如:环境只包含了车的位置,没有车的速度,需要根据过去两个时间点的环境观察来估算车速2. 例如:环境观察存在噪音,需要根据多个环境观察来估算当前环境观察的数据**操作:**根据输出层的概率值来随机选择操作,按概率随机、既可以参考模型输出,同时也给神经网络探索新行为的机会 Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.The main differences relevant to this chapter are:* the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).* the `weights` parameter was renamed to `kernel`,* the default activation is `None` instead of `tf.nn.relu`
###Code
import tensorflow as tf
# 1. 神经网络结构:4个输入层神经元对应当前环境的4个变量,1个输出层神经元对应向左加速的正确概率,隐藏层神经元只有4个因为任务简单不易使用复杂网络
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
# 2. 构建神经网络
# 权重初始化(initializer): 用于缓解梯度消失和梯度爆炸问题
# https://www.w3cschool.cn/tensorflow_python/tensorflow_python-e9tb2ocq.html
# 使用默认参数:
# distribution = "normal" (样本从以0为中心的截断正态分布中抽取, 使用stddev = sqrt(scale / n))
# mode =“fan_in” (用于计算stddev的n为权重张量中输入单元的数量)
# scale=1.0
# 输出层:使用sigmoid激活函数、是为了把隐藏层的输出值映射成[0,1]之间的单一概率(这里建模成了二分类问题,如果是多分类问题需要用softmax激活函数()
# 隐藏层:使用elu激活函数,用于缓解梯度消失问题
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid, kernel_initializer=initializer)
# 3. 根据输出层输出的概率值随机选择操作
# tf.multinomial(logits, num_samples, seed=None, name=None)
# 从多项式分布中抽取样本,:https://www.w3cschool.cn/tensorflow_python/tensorflow_python-l5d12fln.html
# logits:形状为 [batch_size, num_classes] 的二维张量;每个切片:[i, :] 表示所有类的非标准化对数概率
# num_samples: 0维张量.为每行切片绘制的独立样本数
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs]) # (left_prop, right_prop = 1 - leftprop)
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
# 4. 初始化全局变量
init = tf.global_variables_initializer()
###Output
WARNING:tensorflow:From <ipython-input-37-c33ac8782c89>:19: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.
Instructions for updating:
Use keras.layers.dense instead.
WARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From <ipython-input-37-c33ac8782c89>:28: multinomial (from tensorflow.python.ops.random_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.random.categorical instead.
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
# 目前的模型还没有训练(权重都是随机的),先简单跑通,让模型能够操作小车,随后再训练模型
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset() # 环境的初始变量值
for step in range(n_max_steps):
img = render_cart_pole(env, obs) # 渲染小车画面
frames.append(img) # 添加到动画帧中
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)}) # 将当前环境喂给模型,得到预估的操作值
obs, reward, done, info = env.step(action_val[0][0]) # 执行该操作,得到新的环境变量值
if done:
break
# 不要忘记关闭环境
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
# 播放动画帧
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`): **强化学习模型训练的难点在于如何给出正确的样本标注(Y值:棋谱)**下一个版本再探讨这个问题,这一版采用一个非常简单直白的样本标注,即* 车杆左倾斜时,Y标为左加速操作* 车杆右倾斜时,Y标为右加速操作将数据喂给神经网络之后,模型可以学到类似的操作行为(车杆左倾时左加速、右倾时右加速),当然这还不足以让小车运行很长时间
###Code
import tensorflow as tf
# 重置数据流图
reset_graph()
# 数据流图参数
n_inputs = 4 # 输入层4个神经元对应环境观察的4个变量
n_hidden = 4 # 简单问题,使用简单神经网络,隐藏层4个神经元
n_outputs = 1 # 建模为
learning_rate = 0.01 # 梯度下降的学习率
# X是当前环境观察值,Y是当前操作的“棋谱”标注(车杆左倾时标为左加速、车杆右倾时标为右加速)
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
# 构造3层神经网络
initializer = tf.variance_scaling_initializer() # 按正态分布初始化模型权重
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer) #隐藏层
logits = tf.layers.dense(hidden, n_outputs) # logits输出层,只有1个神经元,激活函数设为None以便在下一步手动计算outputs
outputs = tf.nn.sigmoid(logits) # 用sigmoid作为激活函数,将logits值映射为[0,1]区间的概率值
# 使用从多项式分布中、按权重概率随机的方法决定action(左加速、右加速)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
# 使用logits与标签y的交叉熵作为优化目标梯度下降
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
# obs = [小车水平位置(0.0是中心),速度(大于0表示向右),杆的角度(0.0是90度,大于0表示右倾),杆的角速度(大于0表示右加速)]
# 用车杆的倾角作为Y值,车杆左倾时Y=1(等于左加速操作的op_id),车杆右倾时Y=0(等于左加速操作的op_id)
# if angle<0 we want proba(left)=1., or else proba(left)=0.
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations])
# X值是环境的观测值
# Y值是模型操作的标记值(车杆左倾时op标记为左加速,车杆右倾时op标记为右加速)
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
# 从1000个环境中提取数据进行训练
for env_index, env in enumerate(envs):
# 再每个game environment中,不断操作直到game over
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
# 保存session
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
# 从动画演示中可以看出,模型学到了车杆左倾时左加速、车杆右倾时右加速的行为,但是不足以让小车运行很长时间
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
###Output
WARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.
Instructions for updating:
Use standard file APIs to check for files with this prefix.
INFO:tensorflow:Restoring parameters from ./my_policy_net_basic.ckpt
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients (策略梯度) **用策略梯度训练以让小车尽可能运行更长时间的模型****常规监督学习的问题:** * 常规监督学习最小化预估概率和目标(样本标签)概率之间的交叉熵来学习* 在强化学习场景中,代理获得指导(样本标签)的唯一方法是通过回报,但是回报通常是稀疏和延迟的(例如操作100步之后小车的杆子倒了、无法知道是这100步中的哪几步导致杆子倾倒)**用信用分配来解决回报稀疏和延迟的问题:**根据回报的总和来评估一个行为,即在每步之后乘以一次折扣率r(通常r取值为0.95或0.99)* 例子 * 代理决定在某个位置决定未来的3步连续执行三次右加速,折扣系数r=0.8 * 未来的3步得到的回报分别是:+20, 0, -50 * 那么在这个位置代理所做决定的回报值是 20*0.8 + 0 - 50*0.82 = -22* 通常r取值为0.95或0.99 * r = 0.95时,第13步时回报的折扣为0.5 (即0.95^13 = 0.5) * r = 0.99时,第69步时回报的折扣为0.5 (即0.99^69 = 0.5)**一个好的操作可能在很多不好的操作后执行,碰巧遇到杆子倒掉,导致好的行为得到低的分数** * 解决这个问题的办法是:实验足够多次数,即让游戏运行很多遍,将所有的行为得分归一化(通过减去平均值并除以标准偏差)**设个算法简单、但是非常强大,可以解决更复杂的问题。实际上,AlphaGo就是基于类似的PG算法(加上蒙特卡罗树搜索)** To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
# 重置数据流图
reset_graph()
# 模型参数
n_inputs = 4 # 对应环境观察的4个变量
n_hidden = 4 # 简单问题、隐藏层神经元不宜太多
n_outputs = 1 # 建模为2分类
learning_rate = 0.01 # 学习率
initializer = tf.variance_scaling_initializer() # 按某种正态分布初始化模型权重来对抗梯度消失和梯度爆炸
# 模型数据流图:输入层X;隐藏层;输出层(logits,以及根据logits计算出的action)
X = tf.placeholder(tf.float32, shape=[None, n_inputs]) # X是一个batch的环境观察值(形状是[None, n_inputs]) //???
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer) # 隐藏层
logits = tf.layers.dense(hidden, n_outputs) # 单个神经元的logits层、为隐藏层输出的线性表达式,这里激活函数留空(None)在下一步设置
outputs = tf.nn.sigmoid(logits) # 使用sigmod函数、将logits输出的浮点数映射为[0,1]区间的概率值,即向左加速的正确概率
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs]) # [向左加速的正确概率,向右加速的正确概率]
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1) # 根据多项式分布概率值随机选取操作
# 目标概率(y值),行为(action)向左加速时目标概率设为1.0,行为(action)向右加速时目标概率设为0.0
y = 1. - tf.to_float(action)
# 计算预测值(sigmoid_with_logits(logits))与目标值之间的交叉熵
# 使用compute_gradients没使用minimize_gradients是因为希望能有机会在apply之前调整梯度
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
# grads_and_vars是数组,元素是<grad, var>即梯度与变量(模型参数)对,每个模型参数有一个梯度,
gradients = [grad for grad, variable in grads_and_vars]
# 从gradients到gradient_placeholders这个位置是断开的
# 要到数据流图执行时,使用信用分配+rewards均值归一化来从多轮游戏的运行结果中计算
# 用来表示梯度的占位符,使用占位符,会在数据流图执行(后面的代码)时,有机会在apply之前调整梯度(乘以行为得分、归一化、计算调整后的均值)
gradient_placeholders = [] # 格式:grad[]
grads_and_vars_feed = [] # 格式:grad_and_var<grad[], variable>[]
for grad, variable in grads_and_vars:
print(variable) # 变量的形状,后面执行数据流图计算梯度时会用到
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
# 在数据流图被执行时,上面的代码块将整个batch的X所计算给出的梯度都收集到一起
# 因此apply的是
training_op = optimizer.apply_gradients(grads_and_vars_feed)
# 初始化全局变量以及模型序列化模块
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# 执行数据流图时会用到的函数
# 计算总折扣:返回与rewards长度相同的float数组
# 计算方法:对于越
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
# 计算回报
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
# 测试discount_rewards函数
discount_rewards([10, 0, -50], discount_rate=0.8)
###Output
_____no_output_____
###Markdown
**discount_rewards**input: rewards = [10, 0, -50]ouput: discount_reward = [-22 = 10+0*0.81-50*0.82, -40 = 0-50*0.81, -50]
###Code
# 验证discount_and_normalize_rewards函数是否符合设计的初衷
def discount_and_normalize_rewards_with_log(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
print("all_rewards:", all_rewards)
print("all_discounted_rewards:", all_discounted_rewards)
print("flat_rewards:", flat_rewards)
print("reward_mean:", reward_mean)
print("reward_std:", reward_std)
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_and_normalize_rewards_with_log([[10, 0, -50], [10, 20]], discount_rate=0.8)
###Output
all_rewards: [[10, 0, -50], [10, 20]]
all_discounted_rewards: [array([-22., -40., -50.]), array([26., 20.])]
flat_rewards: [-22. -40. -50. 26. 20.]
reward_mean: -13.2
reward_std: 30.947697814215523
###Markdown
从上面的计算结果可以看出,第一次实验结果严重差于第二次实验结果,所有action都被认为是不好的,而第二次所有action都认为是好的discount_and_normalize_rewards返回了我们期望的结果 **discount_and_normalize_rewards**1. 输入rewards: 格式为float[][]2. discounted_rewards: 将rewards中所有的float[]转换为discount之后的float(使用上一个函数discount_rewards)3. flat_rewards: 数组拼接成一个,用来求均值(reward_mean)和标准差(reward_std)4. 为discounted_rewards中的每一个float做归一化(减去均值再除以标准差)
###Code
# 执行数据流图、训练模型
# 初始化env
env = gym.make("CartPole-v0")
# 模型训练参数
n_iterations = 250 # 训练250轮(对于小车出界问题处理不好,训练750轮会改善出界的问题)
n_games_per_update = 10 # 每轮10次游戏(10个epsisodes)都运行之后,使用discount_and_normalize_rewards()计算得分更新梯度
n_max_steps = 1000 # 每次游戏最多执行1000步,避免模型无休止运行下去
save_iterations = 10 # 每训练10轮(10*10=100次游戏),保存一次模型
discount_rate = 0.95 # 折扣率:0.95时,操作在第13步的回报的折扣为0.5(即0.95^13 = 0.5);0.99时,第69步时回报的折扣为0.5
# 执行数据流图
with tf.Session() as sess:
init.run()
# 运行250轮训练
for iteration in range(n_iterations):
# 打印当前论数
print("\rIteration: {}".format(iteration), end="")
# 多轮游戏的rewards和gradients
all_rewards = [] #格式 reward_float[game_idx][step_idx]
all_gradients = [] #格式 gradients[game_idx][step_idx][variable_idx]
# 每轮训练运行10次游戏
for game in range(n_games_per_update):
# 单轮游戏的rewards[]和gradients
current_rewards = []
current_gradients = []
# 每次游戏最多执行1000步,防止游戏无休止运行
obs = env.reset()
for step in range(n_max_steps):
# 对数据流图上半段的action和gradients求值,得到基于当前环境观察预测出的action及模型变量梯度
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
# 在游戏中执行action,更新环境观测值
obs, reward, done, info = env.step(action_val[0][0])
# 将当前的rewards和gradients存入数组
current_rewards.append(reward)
current_gradients.append(gradients_val)
# Game Over
if done:
break
# append整轮游戏的rewards[step_idx],gradients[step_idx][variable_idx]
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
# 计算折扣、均值、正则化之后的rewards[game_idx][step_idx][var_index] = reward[gsv]
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
# 计算一个var的grient,并赋值给对应的gradient_placeholder
# 喂给np.mean(*, axis=0)的数据形状是data[game_idx][step_idx],没有var_idx它在内循环中是定值
mean_input = np.array([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)])
mean_gradients = np.mean(mean_input, axis=0)
# print(mean_input.shape, mean_gradients.shape) #np.mean(*,axis=0)在第一根轴上求均值
# (293, 4, 4) -> (4, 4)
# (293, 4) -> (4,)
# (293, 4, 1) -> (4, 1)
# (293, 1) -> (1,)
# print(variable) # 前面创建数据流图时,打印的4个variable的形状
# <tf.Variable 'dense/kernel:0' shape=(4, 4) dtype=float32_ref> #输入层到隐藏层的连接权重
# <tf.Variable 'dense/bias:0' shape=(4,) dtype=float32_ref> #输入层到隐藏层的偏置权重
# <tf.Variable 'dense_1/kernel:0' shape=(4, 1) dtype=float32_ref> #隐藏层到输出层的连接权重
# <tf.Variable 'dense_1/bias:0' shape=(1,) dtype=float32_ref> #隐藏层到输出层的偏置权重
feed_dict[gradient_placeholder] = mean_gradients
# 对模型下半段的training_op求职,来进行梯度下降
sess.run(training_op, feed_dict=feed_dict)
# 每训练10轮保存一次模型
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
# 演示一下np.mean(raw, axis=0)的用途
raw = np.array([[[1,2,3,4],[5,6,7,8],[9,10,11,12]],[[1,2,3,4],[5,6,7,8],[9,10,11,12]]])
test = np.mean(np.array([raw,raw,raw,raw,raw]), axis=0)
print(np.array([raw,raw,raw,raw,raw]).shape, test.shape)
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
###Output
INFO:tensorflow:Restoring parameters from ./my_policy_net_pg.ckpt
###Markdown
Markov Chains用(状态转移概率)矩阵来表示马尔可夫链
###Code
# 状态转移概率,s是state的缩写
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to s0, s1, s2, s3
[0.0, 1.0, 0.0, 0.0], # from s2 to s0, s1, s2, s3
[0.0, 0.0, 0.0, 1.0], # from s3 to s0, s1, s2, s3
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
# 根据状态转移概率来随机选择下一个状态
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process用矩阵来表示马尔可夫决策过程马尔可夫决策过程:状态 --(采取各种行动的概率)--> 行动 --((当前状态,行动)导致各种新状态的概率)--> 下一个状态(以及环境观测回报(即时回报))矩阵:1. 状态转移(T)矩阵:(当前状态,行动,新状态) -> 转移概率2. 观测回报(R)矩阵:(当前状态,行动,新状态) -> 观测回报3. 可选行动列表:状态 -> 可选行动列表前提条件:**使用MDP必须要满足一个前提条件,需要先验知识,即状态转移矩阵、观测回报矩阵必须已知**
###Code
# 状态转移(T)矩阵:(当前状态,行动,新状态) -> 转移概率
transition_probabilities = [
# 当前状态1 -> 行动1[新状态1,2,3],行动2[新状态1,2,3],行动3[新状态1,2,3]
# 当前状态2 -> 行动1[新状态1,2,3],行动2[新状态1,2,3],行动3[新状态1,2,3]
# 当前状态3 -> 行动1[新状态1,2,3],行动2[新状态1,2,3],行动3[新状态1,2,3]
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
# 观测回报(R)矩阵:(当前状态,行动,新状态) -> 观测回报
rewards = [
# 当前状态1 -> 行动1[新状态1,2,3],行动2[新状态1,2,3],行动3[新状态1,2,3]
# 当前状态2 -> 行动1[新状态1,2,3],行动2[新状态1,2,3],行动3[新状态1,2,3]
# 当前状态3 -> 行动1[新状态1,2,3],行动2[新状态1,2,3],行动3[新状态1,2,3]
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
# 可选的行动:状态 -> 可选行动列表
possible_actions = [[0, 1, 2], [0, 2], [1]]
# 选取容易遭受较大惩罚值的action
def policy_fire(state):
# state是数组下标
# state=0时,action=0
# state=1时,action=2
# state=2时,action=1
return [0, 2, 1][state]
# 随机选取action
def policy_random(state):
return np.random.choice(possible_actions[state])
# 选取容易得到回报的惩罚值
def policy_safe(state):
return [0, 0, 1][state]
# 封装Markov Decision Process Environment
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state = start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
# 选择state,根据<当前state, action>查概率表,按概率随机得到next state以及回reward
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
# 更新state,total_reward
self.state = next_state
self.total_rewards += reward
# 返回
return self.state, reward
# 运行一轮Markov Decision Process
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
# 前5轮输出
print("States (+rewards):", end=" ")
for step in range(n_steps):
# 每1轮前10步输出
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
# 根据policy选取action
action = policy(env.state)
# 查状态转换概率表,得到state和reward
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
# 关注指标:平均回报,标准差,回报最小值,回报最大值
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning 上面的MDP(Markov Decision Process)必须要满足一个前提条件,需要先验知识,即状态转移矩阵、观测回报矩阵必须已知但实际上,在游戏初期代理并不知道转换概率、也不知道回报是什么它必须经历完至少一次所有状态以及所有可能的转换、才能开始预估;通常要经历多次才能让这种预估变得准确**时间差分学习(Temporal difference Learning, TD)** 就是用来让代理在探索MDP的过程中学习状态转移概率(而不需要在初始时给出)计算公式: Vk+1(state) = (1 - alpha) * Vk(s) + alpha * (reward + discount_rate * Vk(state'))其中:* reward是代理观察到的回报值* alpha是学习率超参数,开始时值比较高、随后逐渐减小让模型收敛TD根据代理观测到的状态值、回报值来更新新的状态预估值;对每个状态s,TD简单第持续跟踪代理离开该状态时获得的即时回报均值,加上期望以后得到的回报,**但是并没有考虑所采取的action以及那种action回报最高****Q学习:在TD的基础上,从“跟踪状态”细化到“跟踪“,即** * 用Qk(state, action)替换TD公式中的Vk(state)* 用max(Qk(s', a')替换公式中的Vk(state')计算公式: Qk+1(state, action) = (1 - alpha) * Qk(state, action) + alpha * (reward + discount_rate * max(Qk(s', a'))对每一个,算法持续跟踪代理通过行为action离开状态state时的平均回报,同时加上以后的期望回报由于目标策略会采取最佳行为,因此对下一个状态采用最大的Q预估值**TD和Q学习都不需要先验的状态转移矩阵、观测回报矩阵,只需要知道每个state下容许采取哪些action就够了****下面的Q学习代码,在给定足够的迭代数时,会收敛到最优的Q值** Q-Learning works by watching an agent play (e.g., randomly) and gradually improving its estimates of the Q-Values. Once it has accurate Q-Value estimates (or close enough), then the optimal policy consists in choosing the action that has the highest Q-Value (i.e., the greedy policy).
###Code
n_states = 3 # state数量3
n_actions = 3 # action数量3
n_steps = 20000 # 探索20000步
alpha = 0.01 # 学习率0.01
discount = 0.99 # reward折扣0.99
# 采用随机选择action的策略
exploration_policy = policy_random
# 初始化Q值(其实就是<state, action>回报值):
# 不在possible_actions内的<state,action>初始化为负无穷,其余的初始化为0
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
# 初始化游戏环境
env = MDPEnvironment()
# 运行20000步
for step in range(n_steps):
# 根据当前state所容许的action列表随机选action
action = exploration_policy(env.state)
state = env.state
# 执行action,根据MDP设定转换到下一个state,并获取代理观测到的reward
next_state, reward = env.step(action)
# 查表,选择Q值最大的action(greedy policy),在初始阶段Q值都是0
next_value = np.max(q_values[next_state])
# 因为代理观测到了reward,可以使用Q学习的公式更新<state,action>的Q值
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + discount * next_value)
# 运行足够多次之后可以收敛(用随机选取action的策略,可以学到q_values矩阵)
q_values
# 测试一下学习到的q_values矩阵,根据矩阵中学到的回报预估值来选择action
def optimal_policy(state):
return np.argmax(q_values[state])
# 测试结果显示,用q_values矩阵的数据来选择action,可以得到较高的rewards
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to Play MsPacman Using the DQN Algorithm **Warning**: Unfortunately, the first version of the book contained two important errors in this section.1. The actor DQN and critic DQN should have been named _online DQN_ and _target DQN_ respectively. Actor-critic algorithms are a distinct class of algorithms.2. The online DQN is the one that learns and is copied to the target DQN at regular intervals. The target DQN's only role is to estimate the next state's Q-Values for each possible action. This is needed to compute the target Q-Values for training the online DQN, as shown in this equation:$y(s,a) = \text{r} + \gamma . \underset{a'}{\max} \, Q_\text{target}(s', a')$* $y(s,a)$ is the target Q-Value to train the online DQN for the state-action pair $(s, a)$.* $r$ is the reward actually collected after playing action $a$ in state $s$.* $\gamma$ is the discount rate.* $s'$ is the state actually reached after played action $a$ in state $s$.* $a'$ is one of the possible actions in state $s'$.* $Q_\text{target}(s', a')$ is the target DQN's estimate of the Q-Value of playing action $a'$ while in state $s'$.I hope these errors did not affect you, and if they did, I sincerely apologize. Creating the MsPacman environment
###Code
# 载入PacMan游戏
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
# 一共有9种操作
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
# 对图片预处理,压缩到88*80像素,颜色转成灰度图,同时增加对比度,灰度值正则化到[-128, 127]
mspacman_color = 210 + 164 + 74
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.sum(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img // 3 - 128).astype(np.int8) # normalize from -128 to 127
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
###Output
_____no_output_____
###Markdown
Note: the `preprocess_observation()` function is slightly different from the one in the book: instead of representing pixels as 64-bit floats from -1.0 to 1.0, it represents them as signed bytes (from -128 to 127). The benefit is that the replay memory will take up roughly 8 times less RAM (about 6.5 GB instead of 52 GB). The reduced precision has no visible impact on training.
###Code
# 演示图片处理效果
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN (Deep Q Network) ----- **Q学习的问题是:** * 需要维护大量的 q_value>,对于大型(甚至中型)的MDP计算量过大* 即时是吃豆人这样的游戏,250颗豆子,每种豆子有存在和被吃两种状态,所有可能的状态数大于2250 = 1075,比可观察到的宇宙原子还要多**逼近Q学习:**解决上面问题的办法是使用可接受的参数数量,找到一个能够逼近Q值的函数,这就是**逼近Q学习****传统的逼近Q学习:**从状态提取的手工特征的线性组合(例如,最接近幽灵的距离及方向等)来估计Q值**DQN(Deep Q Network):**DeepMind显示使用深度学习网络会工作的更好,尤其对于复杂问题,并且不需要任何特征工程 通过简单的调整,下面的代码可用学习玩大部分的Atari游戏并且玩的相当好、可以达到超人的程度(具有长时间故事清洁的游戏除外) ----- Note: instead of using `tf.contrib.layers.convolution2d()` or `tf.contrib.layers.conv2d()` (as in the first version of the book), we now use the `tf.layers.conv2d()`, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:* the `num_outputs` parameter was renamed to `filters`,* the `stride` parameter was renamed to `strides`,* the `_fn` suffix was removed from parameter names that had it (e.g., `activation_fn` was renamed to `activation`),* the `weights_initializer` parameter was renamed to `kernel_initializer`,* the weights variable was renamed to `"kernel"` (instead of `"weights"`), and the biases variable was renamed from `"biases"` to `"bias"`,* and the default `activation` is now `None` instead of `tf.nn.relu`.
###Code
reset_graph()
# 环境观察值是吃豆人的游戏图片,图片会压缩到88*80像素、灰度(单通道)
input_height = 88
input_width = 80
input_channels = 1
# 神经网络参数
# 1. 三个卷积层
conv_n_maps = [32, 64, 64] # 卷积核数量
conv_kernel_sizes = [(8,8), (4,4), (3,3)] # 卷积核接受野的大小
conv_strides = [4, 2, 1] # 卷积核的步长
conv_paddings = ["SAME"] * 3 # 接受野填充方式
conv_activation = [tf.nn.relu] * 3 # 激活函数
# 2.隐藏层(全联接层)
n_hidden_in = 64 * 11 * 10 # 64个卷积核 * 88(图片高)/4(步长)/2/1 * 80(图片宽)/4/2/1 = 64 * 11 * 10个输出神经元
n_hidden = 512 # 隐藏层神经元
hidden_activation = tf.nn.relu # 隐藏层激活函数
# 3.输出层(全联接层)
n_outputs = env.action_space.n # 9个离散值,代表8个移动方向,和保持不动
# 4.模型参数初始化(正态分布)
initializer = tf.variance_scaling_initializer()
# 创建Q网络的函数,会用来建两个DQN,一个是“演员”,一个是“观察者”
# * 观察者观察演员玩游戏,并且帮助演员QDN预测下一步的action
# * 演员根据观察者的
def q_network(X_state, name):
# X_state是原始图片的像素值
# prev_layer:将像素值归一化到[-1.0, 1.0]区间段,便于梯度下降
prev_layer = X_state / 128.0
# 观察者(name=target)、演员(name=online)会使用不同的variable_scope
with tf.variable_scope(name) as scope:
# 依次提取每个卷积层的参数
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides, conv_paddings, conv_activation):
# 创建卷积层
prev_layer = tf.layers.conv2d(
prev_layer, # 作为当前层输入的前一层
filters=n_maps, # 卷积核数量
kernel_size=kernel_size, # 接受野大小
strides=strides, # 步长
padding=padding, # 填充方式
activation=activation, # 激活函数
kernel_initializer=initializer # 模型参数初始化方式
)
# 把最后一层卷积层(会有n_maps[3]=64的深度,对应64个卷积核)拉平成1维向量,作为全联接层的输入
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
# 隐藏层(全联接层)
hidden = tf.layers.dense(
last_conv_layer_flat, # 输入
n_hidden, # 神经元数量
activation=hidden_activation, # 激活函数
kernel_initializer=initializer # 模型参数初始化方式
)
# 输出层
outputs = tf.layers.dense(hidden, n_outputs,kernel_initializer=initializer)
# 收集了所有可训练的变量到traninable_vars_by_name中
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var for var in trainable_vars}
# 返回输出层,用于连接后续的节点,继续绘制数据流图
# 返回所有可训练变量,用于在“观察者”和“演员”之间拷贝模型参数
return outputs, trainable_vars_by_name
# 输入占位符,“演员”DQN(name="q_network/online"),“观察者”DQN(name="q_network/target")
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online") #演员QDN
target_q_values, target_vars = q_network(X_state, name="q_networks/target") #观察者QDN
# 拷贝模型参数的操作
copy_ops = [target_var.assign(online_vars[var_name]) for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
# 打印所有模型参数
online_vars
tf.one_hot(2, 9)
# "演员“DQN(online)训练模型参数的数据流图(计算error时用的是online_q_values)
learning_rate = 0.001 #学习率
momentum = 0.95 #动量优化器的参数
with tf.variable_scope("train"):
# 输入占位符(int): 操作id (两个QDN使用相同的输入)
X_action = tf.placeholder(tf.int32, shape=[None] )
# 预测的Q值占位符
y = tf.placeholder(tf.float32, shape=[None, 1])
# 来自“演员”QDN的Q值
# 对X_action(值域[0,8])做one-hot编码后(转成长度为9的1阶张量)
# 在乘以“演员QDN”输出的Q值
# 这样得到一个1阶张量:对应操作id的位置值为Q值,其他位置值为0
q_value = tf.reduce_sum(
online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
# 误差绝对值
error = tf.abs(y - q_value)
# 误差1 = 剪切过大的值之后的误差,值域限定在[0,1.0]
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
# 误差2 = 剪掉的部分*2
linear_error = 2 * (error - clipped_error)
# 损失值 = 误差1^2 + 误差2
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
# 初始化global_step、动量优化器、最小化loss值操作
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
# 初始化全局变量和守护程序Saver
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
WARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
###Markdown
Note: in the first version of the book, the loss function was simply the squared error between the target Q-Values (`y`) and the estimated Q-Values (`q_value`). However, because the experiences are very noisy, it is better to use a quadratic loss only for small errors (below 1.0) and a linear loss (twice the absolute error) for larger errors, which is what the code above computes. This way large errors don't push the model parameters around as much. Note that we also tweaked some hyperparameters (using a smaller learning rate, and using Nesterov Accelerated Gradients rather than Adam optimization, since adaptive gradient algorithms may sometimes be bad, according to this [paper](https://arxiv.org/abs/1705.08292)). We also tweaked a few other hyperparameters below (a larger replay memory, longer decay for the $\epsilon$-greedy policy, larger discount rate, less frequent copies of the online DQN to the target DQN, etc.). We use this `ReplayMemory` class instead of a `deque` because it is much faster for random access (thanks to @NileshPS who contributed it). Moreover, we default to sampling with replacement, which is much faster than sampling without replacement for large replay memories.
###Code
# 重播存储器
class ReplayMemory:
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0 #只影响随机抽样时选样本的范围
# 入队,对满之后会覆盖先前的样本
def append(self, data):
self.buf[self.index] = data
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
# 随机抽样
def sample(self, batch_size, with_replacement=True):
if with_replacement:
indices = np.random.randint(self.length, size=batch_size) # faster
else:
indices = np.random.permutation(self.length)[:batch_size]
return self.buf[indices]
# 可以存储50,000个操作
replay_memory_size = 500000
replay_memory = ReplayMemory(replay_memory_size)
# 随机一批
def sample_memories(batch_size):
# 重访存储器存储了 (state, action, reward, next_state, continue)
cols = [[], [], [], [], []]
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
# state, action, reward, next_state, continue
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
# epsilon-greedy策略,“演员”DQN探索游戏用
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
# 随着step的增加,epsilon会在eps_decay_steps = 2000000个步骤里,从1.0逐步降低到0.05
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
# 随着epsilon的降低,“演员”QDN会逐渐由随机选择action,转变为根据Q值(最高回报)来选择action
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
# 主训练循环用到的参数
n_steps = 4000000 # total number of training steps
training_start = 10000 # start training after 10,000 game iterations
training_interval = 4 # run a training step every 4 game iterations
save_steps = 1000 # save the model every 1,000 training steps
copy_steps = 10000 # copy online DQN to target DQN every 10,000 training steps
discount_rate = 0.99
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations,记录训练开始后游戏执行的总步数
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset,用来标记当前游戏是否game over了
###Output
_____no_output_____
###Markdown
A few variables for tracking progress:
###Code
# 用来检查模型训练状态的变量
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
And now the main training loop!
###Code
# 主训练循环
with tf.Session() as sess:
# 从check_point载入训练状态继续训练;或者从新开始训练
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run() # init = tf.global_variables_initializer()
copy_online_to_target.run() # copy_online_to_target = tf.group(*copy_ops)
# 模型训练
while True:
# step记录训练开始后的训练步骤总数,超过阈值后停止训练
step = global_step.eval()
if step >= n_steps:
break
# iteration记录当前这轮游戏开始后进行的总步数
iteration += 1
# 打印当前训练进度
print("\rIteration {}\tTraining step {}/{} ({:.1f})%\tLoss {:5f}\tMean Max-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
# 如果当前游戏是否game over(根据全局变量done)
if done:
obs = env.reset()
# 前90步对模型训练的帮助不大,每一步都执行0操作(吃豆人不移动只是waiting)
for skip in range(skip_start):
obs, reward, done, info = env.step(0)
# 对图片做压缩和灰度处理
state = preprocess_observation(obs)
# 由“演员”QDN(online)来预估Q Value并决定执行什么操作
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# “演员”QDN(online)执行操作
obs, reward, done, info = env.step(action)
# 执行操作后的state(压缩和灰度处理后的图片)
next_state = preprocess_observation(obs)
# 记录当前state到replay_memory
replay_memory.append((state, action, reward, next_state, 1.0 - done))
# 切换到下一个stete
state = next_state
# 计算统计信息用于屏幕打印及训练状态追踪(Compute statistics for tracking progress, not shown in the book)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
# training_start = 10000步之后踩开始训练,每training_interval=4步训练一次“观察者“
if iteration < training_start or iteration % training_interval != 0:
continue # only train after warmup period and at regular intervals
# 从sample_memories中采样一批”演员“QDN存入的游戏数据
X_state_val, X_action_val, rewards, X_next_state_val, continues = (sample_memories(batch_size))
# 将X_next_state_val喂个”观察者“QDN,由”观察者“QDN来预估并返回输出层的值(next_q_values)
next_q_values = target_q_values.eval(feed_dict={X_state: X_next_state_val})
# “观察者”QDN输出层向量中,Q值最高的元素对应的操作,就是预估的下一步操作,已经后续操作的预估回报
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
# 计算总的回报值:即时回报 + 1_IF_NOT_GAME_OVER * 折扣率 * 后续操作预估回报
y_val = rewards + continues * discount_rate * max_next_q_values
# 跟新“演员”QDN的模型参数
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# 定期从“演员”QDN拷贝模型参数到“观察者”QDN
if step % copy_steps == 0:
copy_online_to_target.run()
# 定期备份训练状态(check_point)
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
INFO:tensorflow:Restoring parameters from ./my_dqn.ckpt
Iteration 188272 Training step 618569/4000000 (15.5)% Loss 3.554193 Mean Max-Q 77.064900
###Markdown
You can interrupt the cell above at any time to test your agent using the cell below. You can then run the cell above once again, it will load the last parameters saved and resume training.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# Online DQN plays
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Extra material Preprocessing for Breakout Here is a preprocessing function you can use to train a DQN for the Breakout-v0 Atari game:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # crop and downsize
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (80×80 grayscale)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, a single image does not give you the direction and speed of the ball, which are crucial informations for playing this game. For this reason, it is best to actually combine several consecutive observations to create the environment's state representation. One way to do that is to create a multi-channel image, with one channel per recent observation. Another is to merge all recent observations into a single-channel image, using `np.max()`. In this case, we need to dim the older images so that the DQN can distinguish the past from the present.
###Code
from collections import deque
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Multichannel state")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("Singlechannel state")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Exercise solutions 1. to 7. See Appendix A. 8. BipedalWalker-v2 Exercise: _Use policy gradients to tackle OpenAI gym's "BipedalWalker-v2"._
###Code
import gym
env = gym.make("BipedalWalker-v2")
###Output
_____no_output_____
###Markdown
Note: if you run into [this issue](https://github.com/openai/gym/issues/100) ("`module 'Box2D._Box2D' has no attribute 'RAND_LIMIT'`") when making the `BipedalWalker-v2` environment, then try this workaround:```$ pip uninstall Box2D-kengz$ pip install git+https://github.com/pybox2d/pybox2d```
###Code
obs = env.reset()
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
obs
###Output
_____no_output_____
###Markdown
You can find the meaning of each of these 24 numbers in the [documentation](https://github.com/openai/gym/wiki/BipedalWalker-v2).
###Code
env.action_space
env.action_space.low
env.action_space.high
###Output
_____no_output_____
###Markdown
This is a 4D continuous action space controling each leg's hip torque and knee torque (from -1 to 1). To deal with a continuous action space, one method is to discretize it. For example, let's limit the possible torque values to these 3 values: -1.0, 0.0, and 1.0. This means that we are left with $3^4=81$ possible actions.
###Code
from itertools import product
possible_torques = np.array([-1.0, 0.0, 1.0])
possible_actions = np.array(list(product(possible_torques, possible_torques, possible_torques, possible_torques)))
possible_actions.shape
tf.reset_default_graph()
# 1. Specify the network architecture
n_inputs = env.observation_space.shape[0] # == 24
n_hidden = 10
n_outputs = len(possible_actions) # == 625
initializer = tf.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.selu,
kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
outputs = tf.nn.softmax(logits)
# 3. Select a random action based on the estimated probabilities
action_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=-1)
# 4. Training
learning_rate = 0.01
y = tf.one_hot(action_index, depth=len(possible_actions))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Let's try running this policy network, although it is not trained yet.
###Code
def run_bipedal_walker(model_path=None, n_max_steps = 1000):
env = gym.make("BipedalWalker-v2")
frames = []
with tf.Session() as sess:
if model_path is None:
init.run()
else:
saver.restore(sess, model_path)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_index_val = action_index.eval(feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
if done:
break
env.close()
return frames
frames = run_bipedal_walker()
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, it really can't walk. So let's train it!
###Code
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 1000
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}/{}".format(iteration + 1, n_iterations), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_index_val, gradients_val = sess.run([action_index, gradients],
feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_bipedal_walker_pg.ckpt")
frames = run_bipedal_walker("./my_bipedal_walker_pg.ckpt")
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exersices in chapter 16. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import sys
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
Note: there may be minor differences between the output of this notebook and the examples shown in the book. You can safely ignore these differences. They are mainly due to the fact that most of the environments provided by OpenAI gym have some randomness. Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
[2017-09-25 11:35:03,438] Making new env: MsPacman-v0
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation (it's a bit jittery within Jupyter):
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. Fixing the rendering issue Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebookIf Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function
except Exception:
openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# use OpenAI gym's rendering function
return env.render(mode="rgb_array")
else:
# rendering for the cart pole environment (in case OpenAI gym can't do it)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # Blue Green Red
pole_col = 0x669acc # Blue Green Red
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.The main differences relevant to this chapter are:* the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).* the `weights` parameter was renamed to `kernel`,* the default activation is `None` instead of `tf.nn.relu`
###Code
import tensorflow as tf
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.contrib.layers.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
###Output
[2017-09-25 11:37:30,225] Making new env: CartPole-v0
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
###Output
[2017-09-25 11:51:05,425] Making new env: CartPole-v0
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning Q-Learning works by watching an agent play (e.g., randomly) and gradually improving its estimates of the Q-Values. Once it has accurate Q-Value estimates (or close enough), then the optimal policy consists in choosing the action that has the highest Q-Value (i.e., the greedy policy).
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to Play MsPacman Using the DQN Algorithm **Warning**: Unfortunately, the first version of the book contained two important errors in this section.1. The actor DQN and critic DQN should have been named _online DQN_ and _target DQN_ respectively. Actor-critic algorithms are a distinct class of algorithms.2. The online DQN is the one that learns and is copied to the target DQN at regular intervals. The target DQN's only role is to estimate the next state's Q-Values for each possible action. This is needed to compute the target Q-Values for training the online DQN, as shown in this equation:$y(s,a) = \text{r} + \gamma . \underset{a'}{\max} \, Q_\text{target}(s', a')$* $y(s,a)$ is the target Q-Value to train the online DQN for the state-action pair $(s, a)$.* $r$ is the reward actually collected after playing action $a$ in state $s$.* $\gamma$ is the discount rate.* $s'$ is the state actually reached after played action $a$ in state $s$.* $a'$ is one of the possible actions in state $s'$.* $Q_\text{target}(s', a')$ is the target DQN's estimate of the Q-Value of playing action $a'$ while in state $s'$.I hope these errors did not affect you, and if they did, I sincerely apologize. Creating the MsPacman environment
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = np.array([210, 164, 74]).mean()
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.mean(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img - 128) / 128 - 1 # normalize from -1. to 1.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN Note: instead of using `tf.contrib.layers.convolution2d()` or `tf.contrib.layers.conv2d()` (as in the first version of the book), we now use the `tf.layers.conv2d()`, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:* the `num_outputs` parameter was renamed to `filters`,* the `stride` parameter was renamed to `strides`,* the `_fn` suffix was removed from parameter names that had it (e.g., `activation_fn` was renamed to `activation`),* the `weights_initializer` parameter was renamed to `kernel_initializer`,* the weights variable was renamed to `"kernel"` (instead of `"weights"`), and the biases variable was renamed from `"biases"` to `"bias"`,* and the default `activation` is now `None` instead of `tf.nn.relu`.
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9 discrete actions are available
initializer = tf.contrib.layers.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keep_dims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Note: in the first version of the book, the loss function was simply the squared error between the target Q-Values (`y`) and the estimated Q-Values (`q_value`). However, because the experiences are very noisy, it is better to use a quadratic loss only for small errors (below 1.0) and a linear loss (twice the absolute error) for larger errors, which is what the code above computes. This way large errors don't push the model parameters around as much. Note that we also tweaked some hyperparameters (using a smaller learning rate, and using Nesterov Accelerated Gradients rather than Adam optimization, since adaptive gradient algorithms may sometimes be bad, according to this [paper](https://arxiv.org/abs/1705.08292)). We also tweaked a few other hyperparameters below (a larger replay memory, longer decay for the $\epsilon$-greedy policy, larger discount rate, less frequent copies of the online DQN to the target DQN, etc.).
###Code
from collections import deque
replay_memory_size = 500000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = np.random.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 4000000 # total number of training steps
training_start = 10000 # start training after 10,000 game iterations
training_interval = 4 # run a training step every 4 game iterations
save_steps = 1000 # save the model every 1,000 training steps
copy_steps = 10000 # copy online DQN to target DQN every 10,000 training steps
discount_rate = 0.99
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset
###Output
_____no_output_____
###Markdown
A few variables for tracking progress:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
And now the main training loop!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f})%\tLoss {:5f}\tMean Max-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Online DQN plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# Compute statistics for tracking progress (not shown in the book)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # only train after warmup period and at regular intervals
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# Train the online DQN
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy the online DQN to the target DQN
if step % copy_steps == 0:
copy_online_to_target.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
INFO:tensorflow:Restoring parameters from ./my_dqn.ckpt
###Markdown
You can interrupt the cell above at any time to test your agent using the cell below. You can then run the cell above once again, it will load the last parameters saved and resume training.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# Online DQN plays
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Extra material Preprocessing for Breakout Here is a preprocessing function you can use to train a DQN for the Breakout-v0 Atari game:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # crop and downsize
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (80×80 grayscale)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, a single image does not give you the direction and speed of the ball, which are crucial informations for playing this game. For this reason, it is best to actually combine several consecutive observations to create the environment's state representation. One way to do that is to create a multi-channel image, with one channel per recent observation. Another is to merge all recent observations into a single-channel image, using `np.max()`. In this case, we need to dim the older images so that the DQN can distinguish the past from the present.
###Code
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Multichannel state")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("Singlechannel state")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exersices in chapter 16. Run in Google Colab **Warning**: this is the code for the 1st edition of the book. Please visit https://github.com/ageron/handson-ml2 for the 2nd edition code, with up-to-date notebooks using the latest library versions. In particular, the 1st edition is based on TensorFlow 1, while the 2nd edition uses TensorFlow 2, which is much simpler to use. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
import numpy as np
import os
import sklearn
import sys
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 1.x
!apt update && apt install -y libpq-dev libsdl2-dev swig xorg-dev xvfb
!pip install -q -U pyvirtualdisplay gym[atari,box2d]
IS_COLAB = True
except Exception:
IS_COLAB = False
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# To get smooth animations
import matplotlib.animation as animation
mpl.rc('animation', html='jshtml')
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
###Output
_____no_output_____
###Markdown
Note: there may be minor differences between the output of this notebook and the examples shown in the book. You can safely ignore these differences. They are mainly due to the fact that most of the environments provided by OpenAI gym have some randomness. Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
_____no_output_____
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
env.seed(42)
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). **Warning**: some environments require access to your display, which opens up a separate window, even if you specify `mode="rgb_array"`. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like [Xvfb](http://en.wikipedia.org/wiki/Xvfb). On Debian or Ubuntu:```bash$ apt update$ apt install -y xvfb```You can then start Jupyter using the `xvfb-run` command:```bash$ xvfb-run -s "-screen 0 1400x900x24" jupyter notebook```Alternatively, you can install the [pyvirtualdisplay](https://github.com/ponty/pyvirtualdisplay) Python library which wraps Xvfb:```bashpython3 -m pip install -U pyvirtualdisplay```And run the following code:
###Code
try:
import pyvirtualdisplay
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
except ImportError:
pass
env.render()
###Output
_____no_output_____
###Markdown
In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
img.shape
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
Saving figure MsPacman
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.seed(42)
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
plt.show()
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
env.seed(42)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation:
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
anim = animation.FuncAnimation(
fig, update_scene, fargs=(frames, patch),
frames=len(frames), repeat=repeat, interval=interval)
plt.close()
return anim
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
env.seed(42)
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment...
###Code
plot_environment(env)
plt.show()
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
env.seed(42)
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plot_environment(env)
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_environment(env)
plt.show()
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
env.seed(42)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.The main differences relevant to this chapter are:* the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).* the `weights` parameter was renamed to `kernel`,* the default activation is `None` instead of `tf.nn.relu`
###Code
import tensorflow as tf
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
WARNING:tensorflow:From <ipython-input-36-e360db0650cb>:12: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.
Instructions for updating:
Use keras.layers.Dense instead.
WARNING:tensorflow:From /Users/ageron/miniconda3/envs/tf1/lib/python3.7/site-packages/tensorflow_core/python/layers/core.py:187: Layer.apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
Please use `layer.__call__` method instead.
WARNING:tensorflow:From <ipython-input-36-e360db0650cb>:18: multinomial (from tensorflow.python.ops.random_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.random.categorical` instead.
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
env.seed(42)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
WARNING:tensorflow:From /Users/ageron/miniconda3/envs/tf1/lib/python3.7/site-packages/tensorflow_core/python/ops/nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps=1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning Q-Learning works by watching an agent play (e.g., randomly) and gradually improving its estimates of the Q-Values. Once it has accurate Q-Value estimates (or close enough), then the optimal policy consists in choosing the action that has the highest Q-Value (i.e., the greedy policy).
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to Play MsPacman Using the DQN Algorithm **Warning**: Unfortunately, the first version of the book contained two important errors in this section.1. The actor DQN and critic DQN should have been named _online DQN_ and _target DQN_ respectively. Actor-critic algorithms are a distinct class of algorithms.2. The online DQN is the one that learns and is copied to the target DQN at regular intervals. The target DQN's only role is to estimate the next state's Q-Values for each possible action. This is needed to compute the target Q-Values for training the online DQN, as shown in this equation:$y(s,a) = \text{r} + \gamma . \underset{a'}{\max} \, Q_\text{target}(s', a')$* $y(s,a)$ is the target Q-Value to train the online DQN for the state-action pair $(s, a)$.* $r$ is the reward actually collected after playing action $a$ in state $s$.* $\gamma$ is the discount rate.* $s'$ is the state actually reached after played action $a$ in state $s$.* $a'$ is one of the possible actions in state $s'$.* $Q_\text{target}(s', a')$ is the target DQN's estimate of the Q-Value of playing action $a'$ while in state $s'$.I hope these errors did not affect you, and if they did, I sincerely apologize. Creating the MsPacman environment
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = 210 + 164 + 74
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.sum(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img // 3 - 128).astype(np.int8) # normalize from -128 to 127
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
###Output
_____no_output_____
###Markdown
Note: the `preprocess_observation()` function is slightly different from the one in the book: instead of representing pixels as 64-bit floats from -1.0 to 1.0, it represents them as signed bytes (from -128 to 127). The benefit is that the replay memory will take up roughly 8 times less RAM (about 6.5 GB instead of 52 GB). The reduced precision has no visible impact on training.
###Code
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN Note: instead of using `tf.contrib.layers.convolution2d()` or `tf.contrib.layers.conv2d()` (as in the first version of the book), we now use the `tf.layers.conv2d()`, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:* the `num_outputs` parameter was renamed to `filters`,* the `stride` parameter was renamed to `strides`,* the `_fn` suffix was removed from parameter names that had it (e.g., `activation_fn` was renamed to `activation`),* the `weights_initializer` parameter was renamed to `kernel_initializer`,* the weights variable was renamed to `"kernel"` (instead of `"weights"`), and the biases variable was renamed from `"biases"` to `"bias"`,* and the default `activation` is now `None` instead of `tf.nn.relu`.
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9 discrete actions are available
initializer = tf.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state / 128.0 # scale pixel intensities to the [-1.0, 1.0] range.
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Note: in the first version of the book, the loss function was simply the squared error between the target Q-Values (`y`) and the estimated Q-Values (`q_value`). However, because the experiences are very noisy, it is better to use a quadratic loss only for small errors (below 1.0) and a linear loss (twice the absolute error) for larger errors, which is what the code above computes. This way large errors don't push the model parameters around as much. Note that we also tweaked some hyperparameters (using a smaller learning rate, and using Nesterov Accelerated Gradients rather than Adam optimization, since adaptive gradient algorithms may sometimes be bad, according to this [paper](https://arxiv.org/abs/1705.08292)). We also tweaked a few other hyperparameters below (a larger replay memory, longer decay for the $\epsilon$-greedy policy, larger discount rate, less frequent copies of the online DQN to the target DQN, etc.). We use this `ReplayMemory` class instead of a `deque` because it is much faster for random access (thanks to @NileshPS who contributed it). Moreover, we default to sampling with replacement, which is much faster than sampling without replacement for large replay memories.
###Code
class ReplayMemory:
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
def sample(self, batch_size, with_replacement=True):
if with_replacement:
indices = np.random.randint(self.length, size=batch_size) # faster
else:
indices = np.random.permutation(self.length)[:batch_size]
return self.buf[indices]
replay_memory_size = 500000
replay_memory = ReplayMemory(replay_memory_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 4000000 # total number of training steps
training_start = 10000 # start training after 10,000 game iterations
training_interval = 4 # run a training step every 4 game iterations
save_steps = 1000 # save the model every 1,000 training steps
copy_steps = 10000 # copy online DQN to target DQN every 10,000 training steps
discount_rate = 0.99
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset
###Output
_____no_output_____
###Markdown
A few variables for tracking progress:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
And now the main training loop!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f})%\tLoss {:5f}\tMean Max-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Online DQN plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# Compute statistics for tracking progress (not shown in the book)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # only train after warmup period and at regular intervals
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# Train the online DQN
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy the online DQN to the target DQN
if step % copy_steps == 0:
copy_online_to_target.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
INFO:tensorflow:Restoring parameters from ./my_dqn.ckpt
###Markdown
You can interrupt the cell above at any time to test your agent using the cell below. You can then run the cell above once again, it will load the last parameters saved and resume training.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# Online DQN plays
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Extra material Preprocessing for Breakout Here is a preprocessing function you can use to train a DQN for the Breakout-v0 Atari game:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # crop and downsize
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (80×80 grayscale)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, a single image does not give you the direction and speed of the ball, which are crucial informations for playing this game. For this reason, it is best to actually combine several consecutive observations to create the environment's state representation. One way to do that is to create a multi-channel image, with one channel per recent observation. Another is to merge all recent observations into a single-channel image, using `np.max()`. In this case, we need to dim the older images so that the DQN can distinguish the past from the present.
###Code
from collections import deque
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Multichannel state")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("Singlechannel state")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Exercise solutions 1. to 7. See Appendix A. 8. BipedalWalker-v3 Exercise: _Use policy gradients to tackle OpenAI gym's "BipedalWalker-v3"._
###Code
import gym
env = gym.make("BipedalWalker-v3")
obs = env.reset()
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
obs
###Output
_____no_output_____
###Markdown
You can find the meaning of each of these 24 numbers in the [documentation](https://github.com/openai/gym/wiki/BipedalWalker-v3).
###Code
env.action_space
env.action_space.low
env.action_space.high
###Output
_____no_output_____
###Markdown
This is a 4D continuous action space controling each leg's hip torque and knee torque (from -1 to 1). To deal with a continuous action space, one method is to discretize it. For example, let's limit the possible torque values to these 3 values: -1.0, 0.0, and 1.0. This means that we are left with $3^4=81$ possible actions.
###Code
from itertools import product
possible_torques = np.array([-1.0, 0.0, 1.0])
possible_actions = np.array(list(product(possible_torques, possible_torques, possible_torques, possible_torques)))
possible_actions.shape
tf.reset_default_graph()
# 1. Specify the network architecture
n_inputs = env.observation_space.shape[0] # == 24
n_hidden = 10
n_outputs = len(possible_actions) # == 625
initializer = tf.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.selu,
kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
outputs = tf.nn.softmax(logits)
# 3. Select a random action based on the estimated probabilities
action_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=-1)
# 4. Training
learning_rate = 0.01
y = tf.one_hot(action_index, depth=len(possible_actions))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Let's try running this policy network, although it is not trained yet.
###Code
def run_bipedal_walker(model_path=None, n_max_steps = 1000):
env = gym.make("BipedalWalker-v3")
frames = []
with tf.Session() as sess:
if model_path is None:
init.run()
else:
saver.restore(sess, model_path)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_index_val = action_index.eval(feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
if done:
break
env.close()
return frames
frames = run_bipedal_walker()
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Nope, it really can't walk. So let's train it!
###Code
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 1000
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}/{}".format(iteration + 1, n_iterations), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_index_val, gradients_val = sess.run([action_index, gradients],
feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_bipedal_walker_pg.ckpt")
frames = run_bipedal_walker("./my_bipedal_walker_pg.ckpt")
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Not the best walker, but at least it stays up and makes (slow) progress to the right.A better solution for this problem is to use an actor-critic algorithm, as it does not require discretizing the action space, and it converges much faster. Check out this nice [blog post](https://towardsdatascience.com/reinforcement-learning-w-keras-openai-actor-critic-models-f084612cfd69) by Yash Patel for more details. 9. Pong DQN Let's explore the `Pong-v0` OpenAI Gym environment.
###Code
import gym
env = gym.make('Pong-v0')
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
We see the observation space is a 210x160 RGB image. The action space is a `Discrete(6)` space with 6 different actions: actions 0 and 1 do nothing, actions 2 and 4 move the paddle up, and finally actions 3 and 5 move the paddle down. The paddle is free to move immediately but the ball does not appear until after 18 steps into the episode.Let's play a game with a completely random policy and plot the resulting animation.
###Code
# A helper function to run an episode of Pong. It's first argument should be a
# function which takes the observation of the environment and the current
# iteration and produces an action for the agent to take.
def run_episode(policy, n_max_steps=1000, frames_per_action=1):
obs = env.reset()
frames = []
for i in range(n_max_steps):
obs, reward, done, info = env.step(policy(obs, i))
frames.append(env.render(mode='rgb_array'))
if done:
break
return plot_animation(frames)
run_episode(lambda obs, i: np.random.randint(0, 5))
###Output
_____no_output_____
###Markdown
The random policy does not fare very well. So let's try to use the DQN and see if we can do better.First let's write a preprocessing function to scale down the input state. Since a single observation does not tell us about the ball's velocity, we will also need to combine multiple observations into a single state. Below is the preprocessing code for this environment. The preprocessing algorithm is two-fold:1. Convert the image in the observation to an image to only black and white and scale it down to 80x80 pixels.2. Combine 3 observations into a single state which depicts the velocity of the paddles and the ball.
###Code
green_paddle_color = (92, 186, 92)
red_paddle_color = (213, 130, 74)
background_color = (144, 72, 17)
ball_color = (236, 236, 236)
def preprocess_observation(obs):
img = obs[34:194:2, ::2].reshape(-1, 3)
tmp = np.full(shape=(80 * 80), fill_value=0.0, dtype=np.float32)
for i, c in enumerate(img):
c = tuple(c)
if c in {green_paddle_color, red_paddle_color, ball_color}:
tmp[i] = 1.0
else:
tmp[i] = 0.0
return tmp.reshape(80, 80)
obs = env.reset()
for _ in range(25):
obs, _, _, _ = env.step(0)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title('Original Observation (160 x 210 RGB)')
plt.imshow(obs)
plt.axis('off')
plt.subplot(122)
plt.title('Preprocessed Observation (80 x 80 Grayscale)')
plt.imshow(preprocess_observation(obs), interpolation='nearest', cmap='gray')
plt.axis('off')
plt.show()
def combine_observations(preprocess_observations, dim_factor=0.75):
dimmed = [obs * (dim_factor ** idx)
for idx, obs in enumerate(reversed(preprocess_observations))]
return np.max(np.array(dimmed), axis=0)
n_observations_per_state = 3
obs = env.reset()
for _ in range(20):
obs, _, _, _ = env.step(0)
preprocess_observations = []
for _ in range(n_observations_per_state):
obs, _, _, _ = env.step(2)
preprocess_observations.append(preprocess_observation(obs))
img = combine_observations(preprocess_observations)
plt.figure(figsize=(6, 6))
plt.title('Combined Observations as a Single State')
plt.imshow(img, interpolation='nearest', cmap='gray')
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Now we are going to build the DQN. Like the DQN for Pac-Man, this model will train 3 convolutional layers, then a hidden fully connected layer, then finally a fully connected layer with 6 neurons, one representing each possible output.
###Code
reset_graph()
input_width = 80
input_height = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [9, 5, 3]
conv_kernel_strides = [4, 2, 1]
conv_paddings = ['VALID'] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 5 * 5 * 64
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n
he_init = tf.contrib.layers.variance_scaling_initializer()
###Output
_____no_output_____
###Markdown
This model will use two DQNs, an online DQN and a target DQN. The online DQN learns new parameters at each training step. The target DQN is used to compute the target Q-Values for the online DQN's loss function during training. The online DQN's parameters are copied to the target DQN at regular intervals.
###Code
def q_network(X_state, name):
prev_layer = X_state
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_kernel_strides, conv_paddings,
conv_activation):
prev_layer = tf.layers.conv2d(prev_layer, filters=n_maps,
kernel_size=kernel_size,
strides=strides, padding=padding,
activation=activation,
kernel_initializer=he_init)
flattened = tf.reshape(prev_layer, [-1, n_hidden_in])
hidden = tf.layers.dense(flattened, n_hidden,
activation=hidden_activation,
kernel_initializer=he_init)
outputs = tf.layers.dense(hidden, n_outputs, kernel_initializer=he_init)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
# Starting the DQN definition.
X_state = tf.placeholder(tf.float32, shape=(None, input_height, input_width,
input_channels))
online_q_values, online_vars = q_network(X_state, 'q_networks/online')
target_q_values, target_vars = q_network(X_state, 'q_networks/target')
copy_ops = [var.assign(online_vars[name]) for name, var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
# Defining the training objective.
learning_rate = 1e-3
momentum = 0.95
with tf.variable_scope('training') as scope:
X_action = tf.placeholder(tf.int32, shape=(None,))
y = tf.placeholder(tf.float32, shape=(None, 1))
Q_target = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
error = tf.abs(y - Q_target)
loss = tf.reduce_mean(tf.square(error))
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum,
use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
This model will sample past experiences from a _Replay Memory_, this will hopefully help the model learn what higher level patterns to pay attention to to find the right action. It also reduces the chance that the model's behavior gets too correlated to it's most recent experiences.The replay memory will store its data in the kernel's memory.
###Code
class ReplayMemory(object):
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.index += 1
self.index %= self.maxlen
self.length = min(self.length + 1, self.maxlen)
def sample(self, batch_size):
return self.buf[np.random.randint(self.length, size=batch_size)]
replay_size = 200000
replay_memory = ReplayMemory(replay_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], \
cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
Now let's define the model's policy during training. Just like in `MsPacMan.ipynb`, we will use an $\varepsilon$-greedy policy.
###Code
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 6000000
def epsilon_greedy(q_values, step):
epsilon = min(eps_min,
eps_max - ((eps_max - eps_min) * (step / eps_decay_steps)))
if np.random.random() < epsilon:
return np.random.randint(n_outputs)
return np.argmax(q_values)
###Output
_____no_output_____
###Markdown
Now we will train the model to play some Pong. The model will input an action once every 3 frames. The preprocessing functions defined above will use the 3 frames to compute the state the model will use to
###Code
n_steps = 10000000
training_start = 100000
training_interval = 4
save_steps = 1000
copy_steps = 10000
discount_rate = 0.95
skip_start = 20
batch_size = 50
iteration = 0
done = True # To reset the environment at the start.
loss_val = np.infty
game_length = 0
total_max_q = 0.0
mean_max_q = 0.0
checkpoint_path = "./pong_dqn.ckpt"
# Utility function to get the environment state for the model.
def perform_action(action):
preprocess_observations = []
total_reward = 0.0
for i in range(3):
obs, reward, done, info = env.step(action)
total_reward += reward
if done:
for _ in range(i, 3):
preprocess_observations.append(preprocess_observation(obs))
break
else:
preprocess_observations.append(preprocess_observation(obs))
return combine_observations(preprocess_observations).reshape(80, 80, 1), \
total_reward, done
# Main training loop
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + '.index'):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print('\rIteration {}\tTraining step {}/{} ({:.1f})%\tLoss {:5f}'
'\tMean Max-Q {:5f} '.format(
iteration, step, n_steps, 100 * step / n_steps, loss_val,
mean_max_q),
end='')
if done:
obs = env.reset()
for _ in range(skip_start):
obs, reward, done, info = env.step(0)
state, reward, done = perform_action(0)
# Evaluate the next action for the agent.
q_values = online_q_values.eval(
feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# The online DQN plays the game.
next_state, reward, done = perform_action(action)
# Save the result in the ReplayMemory.
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# Compute statistics which help us monitor how training is going.
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
# Only train after the warmup rounds and only every few rounds.
if iteration < training_start or iteration % training_interval != 0:
continue
# Sample memories from the reply memory.
X_state_val, X_action_val, rewards, X_next_state_val, continues = \
sample_memories(batch_size)
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# Train the online DQN.
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val,
X_action: X_action_val,
y: y_val,
})
# Regularly copy the online DQN to the target DQN.
if step % copy_steps == 0:
copy_online_to_target.run()
# Regularly save the model.
if step and step % save_steps == 0:
saver.save(sess, checkpoint_path)
preprocess_observations = []
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
def dqn_policy(obs, i):
if len(preprocess_observations) < 3:
preprocess_observations.append(preprocess_observation(obs))
if len(preprocess_observations) == 3:
state = combine_observations(preprocess_observations)
q_values = online_q_values.eval(
feed_dict={X_state: [state.reshape(80, 80, 1)]})
dqn_policy.cur_action = np.argmax(q_values)
return dqn_policy.cur_action
preprocess_observations[i % 3] = preprocess_observation(obs)
if i % 3 == 2:
state = combine_observations(preprocess_observations)
q_values = online_q_values.eval(
feed_dict={X_state: [state.reshape(80, 80, 1)]})
dqn_policy.cur_action = np.argmax(q_values)
return dqn_policy.cur_action
dqn_policy.cur_action = 0
html = run_episode(dqn_policy, n_max_steps=10000)
html
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1276: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.
Instructions for updating:
Use standard file APIs to check for files with this prefix.
INFO:tensorflow:Restoring parameters from /content/gdrive/My Drive/models/pong_dqn.ckpt
###Markdown
**16장 – 강화 학습** _이 노트북은 15장에 있는 모든 샘플 코드와 연습문제 해답을 가지고 있습니다._ 설정 파이썬 2와 3을 모두 지원합니다. 공통 모듈을 임포트하고 맷플롯립 그림이 노트북 안에 포함되도록 설정하고 생성한 그림을 저장하기 위한 함수를 준비합니다:
###Code
# 파이썬 2와 파이썬 3 지원
from __future__ import division, print_function, unicode_literals
# 공통
import numpy as np
import os
import sys
# 일관된 출력을 위해 유사난수 초기화
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# 맷플롯립 설정
from IPython.display import HTML
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# 한글출력
plt.rcParams['font.family'] = 'NanumBarunGothic'
plt.rcParams['axes.unicode_minus'] = False
# 그림을 저장할 폴더
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
OpenAI 짐(gym) 이 노트북에서는 강화 학습 알고리즘을 개발하고 비교할 수 있는 훌륭한 도구인 [OpenAI 짐(gym)](https://gym.openai.com/)을 사용합니다. 짐은 *에이전트*가 학습할 수 있는 많은 환경을 제공합니다. `gym`을 임포트해 보죠:
###Code
import gym
###Output
_____no_output_____
###Markdown
그다음 MsPacman 환경 버전 0을 로드합니다.
###Code
env = gym.make('MsPacman-v0')
###Output
_____no_output_____
###Markdown
`reset()` 메서드를 호출하여 환경을 초기화합니다. 이 메서드는 하나의 관측을 반환합니다:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
관측은 환경마다 다릅니다. 여기에서는 [width, height, channels] 크기의 3D 넘파이 배열로 저장되어 있는 RGB 이미지입니다(채널은 3개로 빨강, 초록, 파랑입니다). 잠시 후에 보겠지만 다른 환경에서는 다른 오브젝트가 반환될 수 있습니다.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
환경은 `render()` 메서드를 사용하여 화면에 나타낼 수 있고 렌더링 모드를 고를 수 있습니다(렌더링 옵션은 환경마다 다릅니다). 이 경우에는 `mode="rgb_array"`로 지정해서 넘파이 배열로 환경에 대한 이미지를 받겠습니다:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
이미지를 그려보죠:
###Code
plt.figure(figsize=(5,6))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
1980년대로 돌아오신 걸 환영합니다! :) 이 환경에서는 렌더링된 이미지가 관측과 동일합니다(하지만 많은 경우에 그렇지 않습니다):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
환경을 그리기 위한 유틸리티 함수를 만들겠습니다:
###Code
def plot_environment(env, figsize=(5,6)):
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
환경을 어떻게 다루는지 보겠습니다. 에이전트는 "행동 공간"(가능한 행동의 모음)에서 하나의 행동을 선택합니다. 이 환경의 액션 공간을 다음과 같습니다:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)`는 가능한 행동이 정수 0에서부터 8까지있다는 의미입니다. 이는 조이스틱의 9개의 위치(0=중앙, 1=위, 2=오른쪽, 3=왼쪽, 4=아래, 5=오른쪽위, 6=왼쪽위, 7=오른쪽아래, 8=왼쪽아래)에 해당합니다. 그다음 환경에게 플레이할 행동을 알려주고 게임의 다음 단계를 진행시킵니다. 왼쪽으로 110번을 진행하고 왼쪽아래로 40번을 진행해 보겠습니다:
###Code
env.reset()
for step in range(110):
env.step(3) #왼쪽
for step in range(40):
env.step(8) #왼쪽아래
###Output
_____no_output_____
###Markdown
어디에 있을까요?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
사실 `step()` 함수는 여러 개의 중요한 객체를 반환해 줍니다:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
앞서 본 것처럼 관측은 보이는 환경을 설명합니다. 여기서는 210x160 RGB 이미지입니다:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
환경은 마지막 스텝에서 받을 수 있는 보상을 알려 줍니다:
###Code
reward
###Output
_____no_output_____
###Markdown
게임이 종료되면 환경은 `done=True`를 반환합니다:
###Code
done
###Output
_____no_output_____
###Markdown
마지막으로 `info`는 환경의 내부 상태에 관한 추가 정보를 제공하는 딕셔너리입니다. 디버깅에는 유용하지만 에이전트는 학습을 위해서 이 정보를 사용하면 안됩니다(학습이 아니고 속이는 셈이므로).
###Code
info
###Output
_____no_output_____
###Markdown
10번의 스텝마다 랜덤한 방향을 선택하는 식으로 전체 게임(3개의 팩맨)을 플레이하고 각 프레임을 저장해 보겠습니다:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
이제 애니메이션으로 한번 보죠:
###Code
def update_scene(num, frames, patch):
plt.close() # 이전 그래프를 닫지 않으면 두 개의 그래프가 출력되는 matplotlib의 버그로 보입니다.
patch.set_data(frames[num])
return patch,
def plot_animation(frames, figsize=(5,6), repeat=False, interval=40):
fig = plt.figure(figsize=figsize)
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch),
frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
환경을 더 이상 사용하지 않으면 환경을 종료하여 자원을 반납합니다:
###Code
env.close()
###Output
_____no_output_____
###Markdown
첫 번째 에이전트를 학습시키기 위해 간단한 Cart-Pole 환경을 사용하겠습니다. 간단한 Cart-Pole 환경 Cart-Pole은 아주 간단한 환경으로 왼쪽이나 오른쪽으로 움직일 수 있는 카트와 카트 위에 수직으로 서 있는 막대로 구성되어 있습니다. 에이전트는 카트를 왼쪽이나 오른쪽으로 움직여서 막대가 넘어지지 않도록 유지시켜야 합니다.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
관측은 4개의 부동소수로 구성된 1D 넘파이 배열입니다. 각각 카트의 수평 위치, 속도, 막대의 각도(0=수직), 각속도를 나타냅니다. 이 환경을 렌더링하려면 먼저 몇 가지 이슈를 해결해야 합니다. 렌더링 이슈 해결하기 일부 환경(Cart-Pole을 포함하여)은 `rgb_array` 모드를 설정하더라도 별도의 창을 띄우기 위해 디스플레이 접근이 필수적입니다. 일반적으로 이 창을 무시하면 됩니다. 주피터가 헤드리스(headless) 서버로 (즉 스크린이 없이) 실행중이면 예외가 발생합니다. 이를 피하는 한가지 방법은 Xvfb 같은 가짜 X 서버를 설치하는 것입니다. `xvfb-run` 명령을 사용해 주피터를 실행합니다: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebook 주피터가 헤드리스 서버로 실행 중이지만 Xvfb를 설치하기 번거롭다면 Cart-Pole에 대해서는 다음 렌더링 함수를 사용할 수 있습니다:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # 문제없음, OpenAI 짐의 렌더링 함수를 사용합니다
except Exception:
openai_cart_pole_rendering = False # 가능한 X 서버가 없다면, 자체 렌더링 함수를 사용합니다
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# OpenAI 짐의 렌더링 함수를 사용합니다
return env.render(mode="rgb_array")
else:
# Cart-Pole 환경을 위한 렌더링 (OpenAI 짐이 처리할 수 없는 경우)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # 파랑 초록 빨강
pole_col = 0x669acc # 파랑 초록 빨강
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
행동 공간을 확인해 보겠습니다:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
네 딱 두 개의 행동이 있네요. 왼쪽이나 오른쪽 방향으로 가속합니다. 막대가 넘어지기 전까지 카트를 왼쪽으로 밀어보죠:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
막대가 실제로 넘어지지 않더라도 너무 기울어지면 게임이 끝납니다. 환경을 다시 초기화하고 이번에는 오른쪽으로 밀어보겠습니다:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
아까 말했던 것과 같은 상황인 것 같습니다. 어떻게 막대가 똑 바로 서있게 만들 수 있을까요? 이를 위한 *정책*을 만들어야 합니다. 이 정책은 에이전트가 각 스텝에서 행동을 선택하기 위해 사용할 전략입니다. 어떤 행동을 할지 결정하기 위해 지난 행동이나 관측을 사용할 수 있습니다. 하드 코딩 정책 간단한 정책을 하드 코딩해 보겠습니다. 막대가 왼쪽으로 기울어지면 카트를 왼쪽으로 밀고 반대의 경우는 오른쪽으로 밉니다. 작동이 되는지 확인해 보죠:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
아니네요, 불안정해서 몇 번 움직이고 막대가 너무 기울어져 게임이 끝났습니다. 더 똑똑한 정책이 필요합니다! 신경망 정책 관측을 입력으로 받고 각 관측에 대해 선택할 행동을 출력하는 신경망을 만들어 보겠습니다. 행동을 선택하기 위해 네트워크는 먼저 각 행동에 대한 확률을 추정하고 그다음 추정된 확률을 기반으로 랜덤하게 행동을 선택합니다. Cart-Pole 환경의 경우에는 두 개의 행동(왼쪽과 오른쪽)이 있으므로 하나의 출력 뉴런만 있으면 됩니다. 행동 0(왼쪽)에 대한 확률 `p`를 출력할 것입니다. 행동 1(오른쪽)에 대한 확률은 `1 - p`가 됩니다.
###Code
import tensorflow as tf
# 1. 네트워크 구조를 설정합니다
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # 간단한 작업이므로 너무 많은 뉴런이 필요하지 않습니다
n_outputs = 1 # 왼쪽으로 가속할 확률을 출력합니다
initializer = tf.variance_scaling_initializer()
# 2. 네트워크를 만듭니다
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. 추정된 확률을 기반으로 랜덤하게 행동을 선택합니다
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
이 환경은 각 관측이 환경의 모든 상태를 포함하고 있기 때문에 지난 행동과 관측은 무시해도 괜찮습니다. 숨겨진 상태가 있다면 이 정보를 추측하기 위해 이전 행동과 상태를 고려해야 합니다. 예를 들어, 속도가 없고 카트의 위치만 있다면 현재 속도를 예측하기 위해 현재의 관측뿐만 아니라 이전 관측도 고려해야 합니다. 관측에 잡음이 있을 때도 같은 경우입니다. 현재 상태를 근사하게 추정하기 위해 과거 몇 개의 관측을 사용하는 것이 좋을 것입니다. 이 문제는 아주 간단해서 현재 관측에 잡음이 없고 환경의 모든 상태가 담겨 있습니다. 정책 네트워크에서 만든 확률을 기반으로 가장 높은 확률을 가진 행동을 고르지 않고 왜 랜덤하게 행동을 선택하는지 궁금할 수 있습니다. 이런 방식이 에이전트가 새 행동을 *탐험*하는 것과 잘 동작하는 행동을 *이용*하는 것 사이에 균형을 맞추게 합니다. 만약 어떤 레스토랑에 처음 방문했다고 가정합시다. 모든 메뉴에 대한 선호도가 동일하므로 랜덤하게 하나를 고릅니다. 이 메뉴가 맛이 좋았다면 다음에 이를 주문할 가능성을 높일 것입니다. 하지만 100% 확률이 되어서는 안됩니다. 그렇지 않으면 다른 메뉴를 전혀 선택하지 않게 되고 더 좋을 수 있는 메뉴를 시도해 보지 못하게 됩니다. 정책 신경망을 랜덤하게 초기화하고 게임 하나를 플레이해 보겠습니다:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
랜덤하게 초기화한 정책 네트워크가 얼마나 잘 동작하는지 확인해 보겠습니다:
###Code
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
음.. 별로 좋지 않네요. 신경망이 더 잘 학습되어야 합니다. 먼저 앞서 사용한 기본 정책을 학습할 수 있는지 확인해 보겠습니다. 막대가 왼쪽으로 기울어지면 왼쪽으로 움직이고 오른쪽으로 기울어지면 오른쪽으로 이동해야 합니다. 다음 코드는 같은 신경망이지만 타깃 확률 `y`와 훈련 연산(`cross_entropy`, `optimizer`, `training_op`)을 추가했습니다:
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # 행동 0(왼쪽)에 대한 확률
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
동일한 네트워크를 동시에 10개의 다른 환경에서 플레이하고 1,000번 반복동안 훈련시키겠습니다. 완료되면 환경을 리셋합니다.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # angle<0 이면 proba(left)=1. 이 되어야 하고 그렇지 않으면 proba(left)=0. 이 되어야 합니다
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_basic.ckpt
###Markdown
정책을 잘 학습한 것 같네요. 이제 스스로 더 나은 정책을 학습할 수 있는지 알아 보겠습니다. 정책 그래디언트 신경망을 훈련하기 위해 타깃 확률 `y`를 정의할 필요가 있습니다. 행동이 좋다면 이 확률을 증가시켜야 하고 반대로 나쁘면 이를 감소시켜야 합니다. 하지만 행동이 좋은지 나쁜지 어떻게 알 수 있을까요? 대부분의 행동으로 인한 영향은 뒤늦게 나타나는 것이 문제입니다. 게임에서 이기거나 질 때 어떤 행동이 이런 결과에 영향을 미쳤는지 명확하지 않습니다. 마지막 행동일까요? 아니면 마지막 10개의 행동일까요? 아니면 50번 스텝 앞의 행동일까요? 이를 *신용 할당 문제*라고 합니다.*정책 그래디언트* 알고리즘은 먼저 여러번 게임을 플레이하고 성공한 게임에서의 행동을 조금 더 높게 실패한 게임에서는 조금 더 낮게 되도록 하여 이 문제를 해결합니다. 먼저 게임을 진행해 보고 다시 어떻게 한 것인지 살펴 보겠습니다.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # 행동 0(왼쪽)에 대한 확률
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\r반복: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames, figsize=(6,4))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_policy_net_pg.ckpt
###Markdown
마르코프 연쇄
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # s0에서 s0, s1, s2, s3으로
[0.0, 0.0, 0.9, 0.1], # s1에서 ...
[0.0, 1.0, 0.0, 0.0], # s2에서 ...
[0.0, 0.0, 0.0, 1.0], # s3에서 ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("상태:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
상태: 0 0 3
상태: 0 1 2 1 2 1 2 1 2 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
상태: 0 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
상태: 0 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
상태: 0 0 3
상태: 0 0 0 1 2 1 2 1 3
상태: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
마르코프 결정 과정
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # s0에서, 행동 a0이 선택되면 0.7의 확률로 상태 s0로 가고 0.3의 확률로 상태 s1로 가는 식입니다.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("상태 (+보상):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("전체 보상 =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("요약: 평균={:.1f}, 표준 편차={:1f}, 최소={}, 최대={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
상태 (+보상): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 210
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... 전체 보상 = 70
상태 (+보상): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 70
상태 (+보상): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... 전체 보상 = -10
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... 전체 보상 = 290
요약: 평균=121.1, 표준 편차=129.333766, 최소=-330, 최대=470
policy_random
상태 (+보상): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... 전체 보상 = -60
상태 (+보상): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... 전체 보상 = -30
상태 (+보상): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... 전체 보상 = 10
상태 (+보상): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... 전체 보상 = 0
상태 (+보상): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... 전체 보상 = 40
요약: 평균=-22.1, 표준 편차=88.152740, 최소=-380, 최대=200
policy_safe
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
상태 (+보상): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... 전체 보상 = 30
상태 (+보상): 0 (10) 0 1 1 1 1 1 1 1 1 ... 전체 보상 = 10
상태 (+보상): 0 1 1 1 1 1 1 1 1 1 ... 전체 보상 = 0
요약: 평균=22.3, 표준 편차=26.244312, 최소=0, 최대=170
###Markdown
Q-러닝 Q-러닝은 에이전트가 플레이하는 것(가령, 랜덤하게)을 보고 점진적으로 Q-가치 추정을 향상시킵니다. 정확한 (또는 충분히 이에 가까운) Q-가치가 추정되면 최적의 정책은 가장 높은 Q-가치(즉, 그리디 정책)를 가진 행동을 선택하는 것이 됩니다.
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # 그리디한 정책
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("요약: 평균={:.1f}, 표준 편차={:1f}, 최소={}, 최대={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
상태 (+보상): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... 전체 보상 = 230
상태 (+보상): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... 전체 보상 = 90
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 170
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... 전체 보상 = 220
상태 (+보상): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... 전체 보상 = -50
요약: 평균=125.6, 표준 편차=127.363464, 최소=-290, 최대=500
###Markdown
DQN 알고리즘으로 미스팩맨 게임 학습하기 미스팩맨 환경 만들기
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
전처리 이미지 전처리는 선택 사항이지만 훈련 속도를 크게 높여 줍니다.
###Code
mspacman_color = 210 + 164 + 74
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # 자르고 크기를 줄입니다.
img = img.sum(axis=2) # 흑백 스케일로 변환합니다.
img[img==mspacman_color] = 0 # 대비를 높입니다.
img = (img // 3 - 128).astype(np.int8) # -128~127 사이로 정규화합니다.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
###Output
_____no_output_____
###Markdown
노트 `preprocess_observation()` 함수가 책에 있는 것과 조금 다릅니다. 64비트 부동소수를 -1.0~1.0 사이로 나타내지 않고 부호있는 바이트(-128~127 사이)로 표현합니다. 이렇게 하는 이유는 재생 메모리가 약 8배나 적게 소모되기 때문입니다(52GB에서 6.5GB로). 정밀도를 감소시켜도 눈에 띄이게 훈련에 미치는 영향은 없습니다.
###Code
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("원본 관측 (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("전처리된 관측 (88×80 그레이스케일)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
DQN 만들기
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3은 11x10 크기의 64개의 맵을 가집니다
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9개의 행동이 가능합니다
initializer = tf.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state / 128.0 # 픽셀 강도를 [-1.0, 1.0] 범위로 스케일 변경합니다.
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keepdims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
노트: 처음 책을 쓸 때는 타깃 Q-가치(y)와 예측 Q-가치(q_value) 사이의 제곱 오차를 사용했습니다. 하지만 매우 잡음이 많은 경험 때문에 작은 오차(1.0 이하)에 대해서만 손실에 이차식을 사용하고, 큰 오차에 대해서는 위의 계산식처럼 선형적인 손실(절대 오차의 두 배)을 사용하는 것이 더 낫습니다. 이렇게 하면 큰 오차가 모델 파라미터를 너무 많이 변경하지 못합니다. 또 몇 가지 하이퍼파라미터를 조정했습니다(작은 학습률을 사용하고 논문에 따르면 적응적 경사 하강법 알고리즘이 이따금 나쁜 성능을 낼 수 있으므로 Adam 최적화대신 네스테로프 가속 경사를 사용합니다). 아래에서 몇 가지 다른 하이퍼파라미터도 수정했습니다(재생 메모리 크기 확대, e-그리디 정책을 위한 감쇠 단계 증가, 할인 계수 증가, 온라인 DQN에서 타깃 DQN으로 복사 빈도 축소 등입니다).
###Code
from collections import deque
replay_memory_size = 500000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = np.random.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # 상태, 행동, 보상, 다음 상태, 계속
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
ReplayMemory 클래스를 사용한 방법 ================== 랜덤 억세스(random access)가 훨씬 빠르기 때문에 deque 대신에 ReplayMemory 클래스를 사용합니다(기여해 준 @NileshPS 님 감사합니다). 또 기본적으로 중복을 허용하여 샘플하면 큰 재생 메모리에서 중복을 허용하지 않고 샘플링하는 것보다 훨씬 빠릅니다.
###Code
class ReplayMemory:
def __init__(self, maxlen):
self.maxlen = maxlen
self.buf = np.empty(shape=maxlen, dtype=np.object)
self.index = 0
self.length = 0
def append(self, data):
self.buf[self.index] = data
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
def sample(self, batch_size, with_replacement=True):
if with_replacement:
indices = np.random.randint(self.length, size=batch_size) # 더 빠름
else:
indices = np.random.permutation(self.length)[:batch_size]
return self.buf[indices]
replay_memory_size = 500000
replay_memory = ReplayMemory(replay_memory_size)
def sample_memories(batch_size):
cols = [[], [], [], [], []] # 상태, 행동, 보상, 다음 상태, 계속
for memory in replay_memory.sample(batch_size):
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
###Output
_____no_output_____
###Markdown
=============================================
###Code
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # 랜덤 행동
else:
return np.argmax(q_values) # 최적 행동
n_steps = 4000000 # 전체 훈련 스텝 횟수
training_start = 10000 # 10,000번 게임을 반복한 후에 훈련을 시작합니다
training_interval = 4 # 4번 게임을 반복하고 훈련 스텝을 실행합니다
save_steps = 1000 # 1,000번 훈련 스텝마다 모델을 저장합니다
copy_steps = 10000 # 10,000번 훈련 스텝마다 온라인 DQN을 타깃 DQN으로 복사합니다
discount_rate = 0.99
skip_start = 90 # 게임의 시작 부분은 스킵합니다 (시간 낭비이므로).
batch_size = 50
iteration = 0 # 게임 반복횟수
checkpoint_path = "./my_dqn.ckpt"
done = True # 환경을 리셋해야 합니다
###Output
_____no_output_____
###Markdown
학습 과정을 트래킹하기 위해 몇 개의 변수가 필요합니다:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
이제 훈련 반복 루프입니다!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\r반복 {}\t훈련 스텝 {}/{} ({:.1f})%\t손실 {:5f}\t평균 최대-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # 게임이 종료되면 다시 시작합니다
obs = env.reset()
for skip in range(skip_start): # 게임 시작 부분은 스킵합니다
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# 온라인 DQN이 해야할 행동을 평가합니다
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# 온라인 DQN으로 게임을 플레이합니다.
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# 재생 메모리에 기록합니다
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# 트래킹을 위해 통계값을 계산합니다 (책에는 없습니다)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # 워밍엄 시간이 지난 후에 일정 간격으로 훈련합니다
# 메모리에서 샘플링하여 타깃 Q-가치를 얻기 위해 타깃 DQN을 사용합니다
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# 온라인 DQN을 훈련시킵니다
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# 온라인 DQN을 타깃 DQN으로 일정 간격마다 복사합니다
if step % copy_steps == 0:
copy_online_to_target.run()
# 일정 간격으로 저장합니다
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
INFO:tensorflow:Restoring parameters from ./my_dqn.ckpt
반복 13992 훈련 스텝 3999999/4000000 (100.0)% 손실 1.250408 평균 최대-Q 221.025110
###Markdown
아래 셀에서 에이전트를 테스트하기 위해 언제든지 위의 셀을 중지할 수 있습니다. 그런다음 다시 위의 셀을 실행하면 마지막으로 저장된 파라미터를 로드하여 훈련을 다시 시작할 것입니다.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# 온라인 DQN이 해야할 행동을 평가합니다
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# 온라인 DQN이 게임을 플레이합니다
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
video = plot_animation(frames, figsize=(5,6))
HTML(video.to_html5_video()) # HTML5 동영상으로 만들어 줍니다
###Output
_____no_output_____
###Markdown
추가 자료 브레이크아웃(Breakout)을 위한 전처리 다음은 Breakout-v0 아타리 게임을 위한 DQN을 훈련시키기 위해 사용할 수 있는 전처리 함수입니다:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # 자르고 크기를 줄입니다.
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("원본 관측 (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("전처리된 관측 (80×80 그레이스케일)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
여기서 볼 수 있듯이 하나의 이미지는 볼의 방향과 속도에 대한 정보가 없습니다. 이 정보들은 이 게임에 아주 중요합니다. 이런 이유로 실제로 몇 개의 연속된 관측을 연결하여 환경의 상태를 표현하는 것이 좋습니다. 한 가지 방법은 관측당 하나의 채널을 할당하여 멀티 채널 이미지를 만드는 것입니다. 다른 방법은 `np.max()` 함수를 사용해 최근의 관측을 모두 싱글 채널 이미지로 합치는 것입니다. 여기에서는 이전 이미지를 흐리게하여 DQN이 현재와 이전을 구분할 수 있도록 했습니다.
###Code
from collections import deque
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.title("멀티 채널 상태")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("싱글 채널 상태")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
연습문제 해답 1. to 7. 부록 A 참조. 8. BipedalWalker-v2 *문제: 정책 그래디언트를 사용해 OpenAI 짐의 ‘BypedalWalker-v2’를 훈련시켜보세요*
###Code
import gym
env = gym.make("BipedalWalker-v2")
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
###Markdown
노트: 만약 `BipedalWalker-v2` 환경을 만들 때 "`module 'Box2D._Box2D' has no attribute 'RAND_LIMIT'`"와 같은 이슈가 발생하면 다음과 같이 해보세요:```$ pip uninstall Box2D-kengz$ pip install git+https://github.com/pybox2d/pybox2d```
###Code
obs = env.reset()
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
obs
###Output
_____no_output_____
###Markdown
이 24개의 숫자에 대한 의미는 [온라인 문서](https://github.com/openai/gym/wiki/BipedalWalker-v2)를 참고하세요.
###Code
env.action_space
env.action_space.low
env.action_space.high
###Output
_____no_output_____
###Markdown
이는 각 다리의 엉덩이 관절의 토크와 발목 관절 토크를 제어하는 연속적인 4D 행동 공간입니다(-1에서 1까지). 연속적인 행동 공간을 다루기 위한 한 가지 방법은 이를 불연속적으로 나누는 것입니다. 예를 들어, 가능한 토크 값을 3개의 값 -1.0, 0.0, 1.0으로 제한할 수 있습니다. 이렇게 하면 가능한 행동은 $3^4=81$개가 됩니다.
###Code
from itertools import product
possible_torques = np.array([-1.0, 0.0, 1.0])
possible_actions = np.array(list(product(possible_torques, possible_torques, possible_torques, possible_torques)))
possible_actions.shape
tf.reset_default_graph()
# 1. 네트워크 구조를 정의합니다
n_inputs = env.observation_space.shape[0] # == 24
n_hidden = 10
n_outputs = len(possible_actions) # == 625
initializer = tf.variance_scaling_initializer()
# 2. 신경망을 만듭니다
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.selu,
kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
outputs = tf.nn.softmax(logits)
# 3. 추정 확률에 기초하여 무작위한 행동을 선택합니다
action_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=-1)
# 4. 훈련
learning_rate = 0.01
y = tf.one_hot(action_index, depth=len(possible_actions))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
아직 훈련되지 않았지만 이 정책 네트워크를 실행해 보죠.
###Code
def run_bipedal_walker(model_path=None, n_max_steps = 1000):
env = gym.make("BipedalWalker-v2")
frames = []
with tf.Session() as sess:
if model_path is None:
init.run()
else:
saver.restore(sess, model_path)
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
action_index_val = action_index.eval(feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
if done:
break
env.close()
return frames
frames = run_bipedal_walker()
video = plot_animation(frames)
HTML(video.to_html5_video())
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
###Markdown
안되네요, 걷지를 못합니다. 그럼 훈련시켜 보죠!
###Code
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 1000
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}/{}".format(iteration + 1, n_iterations), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_index_val, gradients_val = sess.run([action_index, gradients],
feed_dict={X: obs.reshape(1, n_inputs)})
action = possible_actions[action_index_val]
obs, reward, done, info = env.step(action[0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_bipedal_walker_pg.ckpt")
frames = run_bipedal_walker("./my_bipedal_walker_pg.ckpt")
video = plot_animation(frames)
HTML(video.to_html5_video())
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
INFO:tensorflow:Restoring parameters from ./my_bipedal_walker_pg.ckpt
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exersices in chapter 16. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import sys
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
Note: there may be minor differences between the output of this notebook and the examples shown in the book. You can safely ignore these differences. They are mainly due to the fact that most of the environments provided by OpenAI gym have some randomness. Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
[2017-09-25 11:35:03,438] Making new env: MsPacman-v0
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation (it's a bit jittery within Jupyter):
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. Fixing the rendering issue Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebookIf Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function
except Exception:
openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# use OpenAI gym's rendering function
return env.render(mode="rgb_array")
else:
# rendering for the cart pole environment (in case OpenAI gym can't do it)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # Blue Green Red
pole_col = 0x669acc # Blue Green Red
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`. Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.The main differences relevant to this chapter are:* the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).* the `weights` parameter was renamed to `kernel`,* the default activation is `None` instead of `tf.nn.relu`
###Code
import tensorflow as tf
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.contrib.layers.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
###Output
[2017-09-25 11:37:30,225] Making new env: CartPole-v0
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
###Output
[2017-09-25 11:51:05,425] Making new env: CartPole-v0
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return np.random.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = np.random.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning Q-Learning works by watching an agent play (e.g., randomly) and gradually improving its estimates of the Q-Values. Once it has accurate Q-Value estimates (or close enough), then the optimal policy consists in choosing the action that has the highest Q-Value (i.e., the greedy policy).
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to Play MsPacman Using the DQN Algorithm **Warning**: Unfortunately, the first version of the book contained two important errors in this section.1. The actor DQN and critic DQN should have been named _online DQN_ and _target DQN_ respectively. Actor-critic algorithms are a distinct class of algorithms.2. The online DQN is the one that learns and is copied to the target DQN at regular intervals. The target DQN's only role is to estimate the next state's Q-Values for each possible action. This is needed to compute the target Q-Values for training the online DQN, as shown in this equation:$y(s,a) = \text{r} + \gamma . \underset{a'}{\max} \, Q_\text{target}(s', a')$* $y(s,a)$ is the target Q-Value to train the online DQN for the state-action pair $(s, a)$.* $r$ is the reward actually collected after playing action $a$ in state $s$.* $\gamma$ is the discount rate.* $s'$ is the state actually reached after played action $a$ in state $s$.* $a'$ is one of the possible actions in state $s'$.* $Q_\text{target}(s', a')$ is the target DQN's estimate of the Q-Value of playing action $a'$ while in state $s'$.I hope these errors did not affect you, and if they did, I sincerely apologize. Creating the MsPacman environment
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = np.array([210, 164, 74]).mean()
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.mean(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img - 128) / 128 - 1 # normalize from -1. to 1.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN Note: instead of using `tf.contrib.layers.convolution2d()` or `tf.contrib.layers.conv2d()` (as in the first version of the book), we now use the `tf.layers.conv2d()`, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:* the `num_outputs` parameter was renamed to `filters`,* the `stride` parameter was renamed to `strides`,* the `_fn` suffix was removed from parameter names that had it (e.g., `activation_fn` was renamed to `activation`),* the `weights_initializer` parameter was renamed to `kernel_initializer`,* the weights variable was renamed to `"kernel"` (instead of `"weights"`), and the biases variable was renamed from `"biases"` to `"bias"`,* and the default `activation` is now `None` instead of `tf.nn.relu`.
###Code
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"] * 3
conv_activation = [tf.nn.relu] * 3
n_hidden_in = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n # 9 discrete actions are available
initializer = tf.contrib.layers.variance_scaling_initializer()
def q_network(X_state, name):
prev_layer = X_state
with tf.variable_scope(name) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(
conv_n_maps, conv_kernel_sizes, conv_strides,
conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(
prev_layer, filters=n_maps, kernel_size=kernel_size,
strides=strides, padding=padding, activation=activation,
kernel_initializer=initializer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_in])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden,
activation=hidden_activation,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs,
kernel_initializer=initializer)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width,
input_channels])
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
online_vars
learning_rate = 0.001
momentum = 0.95
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keep_dims=True)
error = tf.abs(y - q_value)
clipped_error = tf.clip_by_value(error, 0.0, 1.0)
linear_error = 2 * (error - clipped_error)
loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
Note: in the first version of the book, the loss function was simply the squared error between the target Q-Values (`y`) and the estimated Q-Values (`q_value`). However, because the experiences are very noisy, it is better to use a quadratic loss only for small errors (below 1.0) and a linear loss (twice the absolute error) for larger errors, which is what the code above computes. This way large errors don't push the model parameters around as much. Note that we also tweaked some hyperparameters (using a smaller learning rate, and using Nesterov Accelerated Gradients rather than Adam optimization, since adaptive gradient algorithms may sometimes be bad, according to this [paper](https://arxiv.org/abs/1705.08292)). We also tweaked a few other hyperparameters below (a larger replay memory, longer decay for the $\epsilon$-greedy policy, larger discount rate, less frequent copies of the online DQN to the target DQN, etc.).
###Code
from collections import deque
replay_memory_size = 20000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = np.random.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 2000000
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
return np.random.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 4000000 # total number of training steps
training_start = 10000 # start training after 10,000 game iterations
training_interval = 4 # run a training step every 4 game iterations
save_steps = 1000 # save the model every 1,000 training steps
copy_steps = 10000 # copy online DQN to target DQN every 10,000 training steps
discount_rate = 0.99
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset
###Output
_____no_output_____
###Markdown
A few variables for tracking progress:
###Code
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
###Output
_____no_output_____
###Markdown
And now the main training loop!
###Code
with tf.Session() as sess:
if os.path.isfile(checkpoint_path + ".index"):
saver.restore(sess, checkpoint_path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f})%\tLoss {:5f}\tMean Max-Q {:5f} ".format(
iteration, step, n_steps, step * 100 / n_steps, loss_val, mean_max_q), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Online DQN plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
# Compute statistics for tracking progress (not shown in the book)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration < training_start or iteration % training_interval != 0:
continue # only train after warmup period and at regular intervals
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, continues = (
sample_memories(batch_size))
next_q_values = target_q_values.eval(
feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# Train the online DQN
_, loss_val = sess.run([training_op, loss], feed_dict={
X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy the online DQN to the target DQN
if step % copy_steps == 0:
copy_online_to_target.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
INFO:tensorflow:Restoring parameters from ./my_dqn.ckpt
###Markdown
You can interrupt the cell above at any time to test your agent using the cell below. You can then run the cell above once again, it will load the last parameters saved and resume training.
###Code
frames = []
n_max_steps = 10000
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
obs = env.reset()
for step in range(n_max_steps):
state = preprocess_observation(obs)
# Online DQN evaluates what to do
q_values = online_q_values.eval(feed_dict={X_state: [state]})
action = np.argmax(q_values)
# Online DQN plays
obs, reward, done, info = env.step(action)
img = env.render(mode="rgb_array")
frames.append(img)
if done:
break
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Extra material Preprocessing for Breakout Here is a preprocessing function you can use to train a DQN for the Breakout-v0 Atari game:
###Code
def preprocess_observation(obs):
img = obs[34:194:2, ::2] # crop and downsize
return np.mean(img, axis=2).reshape(80, 80) / 255.0
env = gym.make("Breakout-v0")
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (80×80 grayscale)")
plt.imshow(img, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, a single image does not give you the direction and speed of the ball, which are crucial informations for playing this game. For this reason, it is best to actually combine several consecutive observations to create the environment's state representation. One way to do that is to create a multi-channel image, with one channel per recent observation. Another is to merge all recent observations into a single-channel image, using `np.max()`. In this case, we need to dim the older images so that the DQN can distinguish the past from the present.
###Code
def combine_observations_multichannel(preprocessed_observations):
return np.array(preprocessed_observations).transpose([1, 2, 0])
def combine_observations_singlechannel(preprocessed_observations, dim_factor=0.5):
dimmed_observations = [obs * dim_factor**index
for index, obs in enumerate(reversed(preprocessed_observations))]
return np.max(np.array(dimmed_observations), axis=0)
n_observations_per_state = 3
preprocessed_observations = deque([], maxlen=n_observations_per_state)
obs = env.reset()
for step in range(10):
obs, _, _, _ = env.step(1)
preprocessed_observations.append(preprocess_observation(obs))
img1 = combine_observations_multichannel(preprocessed_observations)
img2 = combine_observations_singlechannel(preprocessed_observations)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Multichannel state")
plt.imshow(img1, interpolation="nearest")
plt.axis("off")
plt.subplot(122)
plt.title("Singlechannel state")
plt.imshow(img2, interpolation="nearest", cmap="gray")
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
**Chapter 16 – Reinforcement Learning** This notebook contains all the sample code and solutions to the exercices in chapter 16. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import numpy.random as rnd
import os
# to make this notebook's output stable across runs
rnd.seed(42)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
###Output
_____no_output_____
###Markdown
Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Next we will load the MsPacman environment, version 0.
###Code
env = gym.make('MsPacman-v0')
###Output
INFO:gym.envs.registration:Making new env: MsPacman-v0
[2016-10-23 14:19:54,435] Making new env: MsPacman-v0
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
###Code
obs.shape
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
###Output
_____no_output_____
###Markdown
Let's plot this image:
###Code
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
###Output
_____no_output_____
###Markdown
Welcome back to the 1980s! :) In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
###Code
(img == obs).all()
###Output
_____no_output_____
###Markdown
Let's create a little helper function to plot an environment:
###Code
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
`Discrete(9)` means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left). Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
###Code
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
###Output
_____no_output_____
###Markdown
Where are we now?
###Code
plot_environment(env)
###Output
_____no_output_____
###Markdown
The `step()` function actually returns several important objects:
###Code
obs, reward, done, info = env.step(0)
###Output
_____no_output_____
###Markdown
The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
###Code
obs.shape
###Output
_____no_output_____
###Markdown
The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
###Code
info
###Output
_____no_output_____
###Markdown
Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation (it's a bit jittery within Jupyter):
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Once you have finished playing with an environment, you should close it to free up resources:
###Code
env.close()
###Output
_____no_output_____
###Markdown
To code our first learning agent, we will be using a simpler environment: the Cart-Pole. A simple environment: the Cart-Pole The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make("CartPole-v0")
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first. Fixing the rendering issue Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the `rgb_array` mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the `xvfb-run` command: $ xvfb-run -s "-screen 0 1400x900x24" jupyter notebookIf you are running this notebook using Binder, then this has been taken care of for you. If not, and you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
###Code
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function
except Exception:
openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# use OpenAI gym's rendering function
return env.render(mode="rgb_array")
else:
# rendering for the cart pole environment (in case OpenAI gym can't do it)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # Blue Green Red
pole_col = 0x669acc # Blue Green Red
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Now let's look at the action space:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
###Output
_____no_output_____
###Markdown
Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
###Code
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
###Output
_____no_output_____
###Markdown
Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`.
###Code
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.contrib.layers.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu,
weights_initializer=initializer)
outputs = fully_connected(hidden, n_outputs, activation_fn=tf.nn.sigmoid,
weights_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(concat_dim=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.initialize_all_variables()
###Output
_____no_output_____
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's randomly initialize this policy neural network and use it to play one game:
###Code
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performed:
###Code
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities `y`, and the training operations (`cross_entropy`, `optimizer` and `training_op`):
###Code
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
tf.reset_default_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)
logits = fully_connected(hidden, n_outputs, activation_fn=None)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(concat_dim=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits, y)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
###Output
_____no_output_____
###Markdown
We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
###Code
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
###Output
INFO:gym.envs.registration:Making new env: CartPole-v0
[2016-10-23 14:21:09,941] Making new env: CartPole-v0
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
###Code
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
tf.reset_default_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)
logits = fully_connected(hidden, n_outputs, activation_fn=None)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(concat_dim=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits, y)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\r{}\tTotal rewards: ".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
print(np.sum(current_rewards), end=" ")
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
###Output
INFO:gym.envs.registration:Making new env: CartPole-v0
[2016-10-23 14:32:17,325] Making new env: CartPole-v0
###Markdown
Markov Chains
###Code
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = rnd.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process
###Code
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return rnd.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = rnd.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
policy_fire
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 2 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = 210
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 2 (40) 0 (10) ... Total rewards = 70
States (+rewards): 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 70
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 ... Total rewards = -10
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) ... Total rewards = 290
Summary: mean=121.1, std=129.333766, min=-330, max=470
policy_random
States (+rewards): 0 1 (-50) 2 1 (-50) 2 (40) 0 1 (-50) 2 2 (40) 0 ... Total rewards = -60
States (+rewards): 0 (10) 0 0 0 0 0 (10) 0 0 0 (10) 0 ... Total rewards = -30
States (+rewards): 0 1 1 (-50) 2 (40) 0 0 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 (10) 0 (10) 0 0 0 0 1 (-50) 2 (40) 0 0 ... Total rewards = 0
States (+rewards): 0 0 (10) 0 1 (-50) 2 (40) 0 0 0 0 (10) 0 (10) ... Total rewards = 40
Summary: mean=-22.1, std=88.152740, min=-380, max=200
policy_safe
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 1 1 1 1 1 ... Total rewards = 30
States (+rewards): 0 (10) 0 1 1 1 1 1 1 1 1 ... Total rewards = 10
States (+rewards): 0 1 1 1 1 1 1 1 1 1 ... Total rewards = 0
Summary: mean=22.3, std=26.244312, min=0, max=170
###Markdown
Q-Learning Q-Learning will learn the optimal policy by watching the random policy play.
###Code
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
###Output
States (+rewards): 0 (10) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) ... Total rewards = 230
States (+rewards): 0 (10) 0 (10) 0 (10) 0 1 (-50) 2 2 1 (-50) 2 (40) 0 (10) ... Total rewards = 90
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 170
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) 0 (10) ... Total rewards = 220
States (+rewards): 0 1 (-50) 2 (40) 0 (10) 0 1 (-50) 2 (40) 0 (10) 0 (10) 0 (10) ... Total rewards = -50
Summary: mean=125.6, std=127.363464, min=-290, max=500
###Markdown
Learning to play MsPacman using Deep Q-Learning
###Code
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
###Output
_____no_output_____
###Markdown
Preprocessing Preprocessing the images is optional but greatly speeds up training.
###Code
mspacman_color = np.array([210, 164, 74]).mean()
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.mean(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img - 128) / 128 - 1 # normalize from -1. to 1.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
###Output
_____no_output_____
###Markdown
Build DQN
###Code
tf.reset_default_graph()
from tensorflow.contrib.layers import convolution2d, fully_connected
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"]*3
conv_activation = [tf.nn.relu]*3
n_hidden_inputs = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n
initializer = tf.contrib.layers.variance_scaling_initializer()
learning_rate = 0.01
def q_network(X_state, scope):
prev_layer = X_state
conv_layers = []
with tf.variable_scope(scope) as scope:
for n_maps, kernel_size, stride, padding, activation in zip(conv_n_maps, conv_kernel_sizes, conv_strides, conv_paddings, conv_activation):
prev_layer = convolution2d(prev_layer, num_outputs=n_maps, kernel_size=kernel_size, stride=stride, padding=padding, activation_fn=activation, weights_initializer=initializer)
conv_layers.append(prev_layer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_inputs])
hidden = fully_connected(last_conv_layer_flat, n_hidden, activation_fn=hidden_activation, weights_initializer=initializer)
outputs = fully_connected(hidden, n_outputs, activation_fn=None)
trainable_vars = {var.name[len(scope.name):]: var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)}
return outputs, trainable_vars
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels])
actor_q_values, actor_vars = q_network(X_state, scope="q_networks/actor") # acts
critic_q_values, critic_vars = q_network(X_state, scope="q_networks/critic") # learns
copy_ops = [actor_var.assign(critic_vars[var_name])
for var_name, actor_var in actor_vars.items()]
copy_critic_to_actor = tf.group(*copy_ops)
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(critic_q_values * tf.one_hot(X_action, n_outputs),
reduction_indices=1, keep_dims=True)
cost = tf.reduce_mean(tf.square(y - q_value))
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cost, global_step=global_step)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
actor_vars
from collections import deque
replay_memory_size = 10000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = rnd.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.05
eps_max = 1.0
eps_decay_steps = 50000
import sys
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
print(" epsilon {}".format(epsilon), end="")
sys.stdout.flush()
if rnd.rand() < epsilon:
return rnd.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 100000 # total number of training steps
training_start = 1000 # start training after 1,000 game iterations
training_interval = 3 # run a training step every 3 game iterations
save_steps = 50 # save the model every 50 training steps
copy_steps = 25 # copy the critic to the actor every 25 training steps
discount_rate = 0.95
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "my_dqn.ckpt"
done = True # env needs to be reset
with tf.Session() as sess:
if os.path.isfile(checkpoint_path):
saver.restore(sess, checkpoint_path)
else:
init.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f}%)".format(iteration, step, n_steps, step * 100 / n_steps), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip boring game iterations at the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Actor evaluates what to do
q_values = actor_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Actor plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
if iteration < training_start or iteration % training_interval != 0:
continue
# Critic learns
X_state_val, X_action_val, rewards, X_next_state_val, continues = sample_memories(batch_size)
next_q_values = actor_q_values.eval(feed_dict={X_state: X_next_state_val})
y_val = rewards + continues * discount_rate * np.max(next_q_values, axis=1, keepdims=True)
training_op.run(feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy critic to actor
if step % copy_steps == 0:
copy_critic_to_actor.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
###Output
Iteration 7695 Training step 3499/3500 (100.0%) epsilon 0.933519
###Markdown
DQN for the Cart-Pole
###Code
eps_min = 0.1
eps_max = 1.0
eps_decay_steps = 20000
import sys
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
print(" epsilon {}".format(epsilon), end="")
sys.stdout.flush()
if rnd.rand() < epsilon:
return rnd.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
tf.reset_default_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 2
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
def q_network(X_state, scope):
with tf.variable_scope(scope) as scope:
hidden = fully_connected(X_state, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)
outputs = fully_connected(hidden, n_outputs, activation_fn=None)
trainable_vars = {var.name[len(scope.name):]: var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)}
return outputs, trainable_vars
X_state = tf.placeholder(tf.float32, shape=[None, n_inputs])
actor_q_values, actor_vars = q_network(X_state, scope="q_networks/actor") # acts
critic_q_values, critic_vars = q_network(X_state, scope="q_networks/critic") # learns
copy_ops = [actor_var.assign(critic_vars[var_name])
for var_name, actor_var in actor_vars.items()]
copy_critic_to_actor = tf.group(*copy_ops)
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(critic_q_values * tf.one_hot(X_action, n_outputs),
reduction_indices=1, keep_dims=True)
cost = tf.reduce_mean(tf.square(y - q_value))
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cost, global_step=global_step)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
n_steps = 50000 # total number of training steps
training_start = 1000 # start training after 1,000 game iterations
training_interval = 3 # run a training step every 3 game iterations
save_steps = 50 # save the model every 50 training steps
copy_steps = 25 # copy the critic to the actor every 25 training steps
discount_rate = 0.95
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "my_dqn.ckpt"
done = True # env needs to be reset
env = gym.make("CartPole-v0")
replay_memory.clear()
with tf.Session() as sess:
if os.path.isfile(checkpoint_path):
saver.restore(sess, checkpoint_path)
else:
init.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f}%)".format(iteration, step, n_steps, step * 100 / n_steps), end="")
if done: # game over, start again
obs = env.reset()
state = obs
# Actor evaluates what to do
q_values = actor_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Actor plays
obs, reward, done, info = env.step(action)
next_state = obs
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
if iteration < training_start or iteration % training_interval != 0:
continue
# Critic learns
X_state_val, X_action_val, rewards, X_next_state_val, continues = sample_memories(batch_size)
next_q_values = actor_q_values.eval(feed_dict={X_state: X_next_state_val})
y_val = rewards + continues * discount_rate * np.max(next_q_values, axis=1, keepdims=True)
training_op.run(feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy critic to actor
if step % copy_steps == 0:
copy_critic_to_actor.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
n_max_steps = 1000
frames = []
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, checkpoint_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
actor_q_values_val = actor_q_values.eval(feed_dict={X_state: obs.reshape(1, n_inputs)})
action_val = np.argmax(actor_q_values_val)
obs, reward, done, info = env.step(action_val)
if done:
break
len(frames)
video = plot_animation(frames)
plt.show()
###Output
_____no_output_____ |
distribution/bernoulli.ipynb | ###Markdown
* Y ∼ Ber(p): Y distributed Bernoulli with success parameter p, p ∈ (0,1) and we have $P(Y =1)=p=1−P(Y =0)$.* To verify: $P(Y=1)=P(u<p)=p$
###Code
def bernoulli(p, n=1):
return np.random.rand(n) < p
y = Counter(bernoulli(0.2, 1000))
y.values()
plt.bar(np.arange(len(y)), list(map(int, y.values())))
plt.xticks(np.arange(len(y)), y.keys())
plt.show()
###Output
_____no_output_____ |
02_Demos/GeostatsPy_multivariate.ipynb | ###Markdown
GeostatsPy: Multivariate Analysis for Subsurface Data Analytics in Python Michael Pyrcz, Associate Professor, University of Texas at Austin [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) PGE 383 Exercise: Multivariate Analysis for Subsurface Data Analytics in Python Here's a simple workflow, demonstration of multivariate analysis for subsurface modeling workflows. This should help you get started with building subsurface models that integrate uncertainty in the sample statistics. Bivariate AnalysisUnderstand and quantify the relationship between two variables* example: relationship between porosity and permeability* how can we use this relationship?What would be the impact if we ignore this relationship and simply modeled porosity and permeability independently?* no relationship beyond constraints at data locations* independent away from data* nonphysical results, unrealistic uncertainty models Bivariate StatisticsPearson’s Product‐Moment Correlation Coefficient* Provides a measure of the degree of linear relationship.* We refer to it as the 'correlation coefficient'Let's review the sample variance of variable $x$. Of course, I'm truncating our notation as $x$ is a set of samples a locations in our modeling space, $x(\bf{u_\alpha}), \, \forall \, \alpha = 0, 1, \dots, n - 1$.\begin{equation}\sigma^2_{x} = \frac{\sum_{i=1}^{n} (x_i - \overline{x})^2}{(n-1)}\end{equation}We can expand the the squared term and replace on of them with $y$, another variable in addition to $x$.\begin{equation}C_{xy} = \frac{\sum_{i=1}^{n} (x_i - \overline{x})(y_i - \overline{y})}{(n-1)}\end{equation}We now have a measure that represents the manner in which variables $x$ and $y$ co-vary or vary together. We can standardized the covariance by the product of the standard deviations of $x$ and $y$ to calculate the correlation coefficent. \begin{equation}\rho_{xy} = \frac{\sum_{i=1}^{n} (x_i - \overline{x})(y_i - \overline{y})}{(n-1)\sigma_x \sigma_y}, \, -1.0 \le \rho_{xy} \le 1.0\end{equation}In summary we can state that the correlation coefficient is related to the covariance as:\begin{equation}\rho_{xy} = \frac{C_{xy}}{\sigma_x \sigma_y}\end{equation}The Person's correlation coefficient is quite sensitive to outliers and depature from linear behavoir (in the bivariate sense). We have an altenrative known as the Spearman's rank correlations coefficient. \begin{equation}\rho_{R_x R_y} = \frac{\sum_{i=1}^{n} (R_{x_i} - \overline{R_x})(R_{y_i} - \overline{R_y})}{(n-1)\sigma_{R_x} \sigma_{R_y}}, \, -1.0 \le \rho_{xy} \le 1.0\end{equation}The rank correlation applies the rank transform to the data prior to calculating the correlation coefficent. To calculate the rank transform simply replace the data values with the rank $R_x = 1,\dots,n$, where $n$ is the maximum value and $1$ is the minimum value. \begin{equation}x_\alpha, \, \forall \alpha = 1,\dots, n, \, | \, x_i \ge x_j \, \forall \, i \gt j \end{equation}\begin{equation}R_{x_i} = i\end{equation}The corelation coefficients provide useful metrics to quantify relationships between two variables at a time. We can also consider bivariate scatter plots and matrix scatter plots to visualize multivariate data. In general, current practical subsurface modeling is bivariate, two variables at a time. Multivariate StatisticsSee lecture on Multivariate Statistics, including the concepts of joint, conditional and marginal probability. Objective In the PGE 383: Stochastic Subsurface Modeling class I want to provide hands-on experience with building subsurface modeling workflows. Python provides an excellent vehicle to accomplish this. I have coded a package called GeostatsPy with GSLIB: Geostatistical Library (Deutsch and Journel, 1998) functionality that provides basic building blocks for building subsurface modeling workflows. The objective is to remove the hurdles of subsurface modeling workflow construction by providing building blocks and sufficient examples. This is not a coding class per se, but we need the ability to 'script' workflows working with numerical methods. Getting StartedHere's the steps to get setup in Python with the GeostatsPy package:1. Install Anaconda 3 on your machine (https://www.anaconda.com/download/). 2. From Anaconda Navigator (within Anaconda3 group), go to the environment tab, click on base (root) green arrow and open a terminal. 3. In the terminal type: pip install geostatspy. 4. Open Jupyter and in the top block get started by copy and pasting the code block below from this Jupyter Notebook to start using the geostatspy functionality. You will need to copy the data file to your working directory. They are available here:* Tabular data - sample_data_MV_biased.csv at https://git.io/fhgu0.There are exampled below with these functions. You can go here to see a list of the available functions, https://git.io/fh4eX, other example workflows and source code.
###Code
import geostatspy.GSLIB as GSLIB # GSLIB utilies, visualization and wrapper
import geostatspy.geostats as geostats # GSLIB methods convert to Python
###Output
_____no_output_____
###Markdown
We will also need some standard packages. These should have been installed with Anaconda 3.
###Code
import numpy as np # ndarrys for gridded data
import pandas as pd # DataFrames for tabular data
import os # set working directory, run executables
import matplotlib.pyplot as plt # for plotting
from scipy import stats # summary statistics
import math # trig etc.
import scipy.signal as signal # kernel for moving window calculation
import random
import seaborn as sns
###Output
_____no_output_____
###Markdown
Set the working directoryI always like to do this so I don't lose files and to simplify subsequent read and writes (avoid including the full address each time).
###Code
os.chdir("c:/PGE383") # set the working directory
###Output
_____no_output_____
###Markdown
Loading Tabular DataHere's the command to load our comma delimited data file in to a Pandas' DataFrame object.
###Code
df = pd.read_csv('sample_data_MV_biased.csv') # load our data table (wrong name!)
###Output
_____no_output_____
###Markdown
Visualizing the DataFrame would be useful and we already learned about these methods in this demo (https://git.io/fNgRW). We can preview the DataFrame by printing a slice or by utilizing the 'head' DataFrame member function (with a nice and clean format, see below). With the slice we could look at any subset of the data table and with the head command, add parameter 'n=13' to see the first 13 rows of the dataset.
###Code
print(df.iloc[0:5,:]) # display first 4 samples in the table as a preview
df.head(n=13) # we could also use this command for a table preview
###Output
Unnamed: 0 X Y Facies Porosity Perm AI
0 0 100.0 900.0 0.0 0.101319 1.996868 5590.417154
1 1 100.0 800.0 1.0 0.147676 10.711789 3470.845666
2 2 100.0 700.0 1.0 0.145912 17.818143 3586.988513
3 3 100.0 600.0 1.0 0.186167 217.109365 3732.114787
4 4 100.0 500.0 1.0 0.146088 16.717367 2534.551236
###Markdown
Summary Statistics for Tabular DataThe table includes X and Y coordinates (meters), Facies 1 and 0 (1 is sandstone and 0 interbedded sand and mudstone), Porosity (fraction), and permeability as Perm (mDarcy). There are a lot of efficient methods to calculate summary statistics from tabular data in DataFrames. The describe command provides count, mean, minimum, maximum, and quartiles all in a nice data table. We use transpose just to flip the table so that features are on the rows and the statistics are on the columns.
###Code
df.describe().transpose()
###Output
_____no_output_____
###Markdown
Visualizing Tabular Data with Location Maps It is natural to set the x and y coordinate and feature ranges manually. e.g. do you want your color bar to go from 0.05887 to 0.24230 exactly? Also, let's pick a color map for display. I heard that plasma is known to be friendly to the color blind as the color and intensity vary together (hope I got that right, it was an interesting Twitter conversation started by Matt Hall from Agile if I recall correctly). We will assume a study area of 0 to 1,000m in x and y and omit any data outside this area.
###Code
xmin = 0.0; xmax = 1000.0 # range of x values
ymin = 0.0; ymax = 1000.0 # range of y values
pormin = 0.05; pormax = 0.25; # range of porosity values
permmin = 0.01; permmax = 2000.0 # range of permeability values
AImin = 2000.0; AImax = 8000.0 # range of AI values
nx = 100; ny = 100; csize = 10.0
cmap = plt.cm.plasma # color map
###Output
_____no_output_____
###Markdown
Let's try out locmap. This is a reimplementation of GSLIB's locmap program that uses matplotlib. I hope you find it simpler than matplotlib, if you want to get more advanced and build custom plots lock at the source. If you improve it, send me the new code. Now we can populate the plotting parameters and visualize the porosity data.
###Code
plt.subplot(221)
GSLIB.locmap_st(df,'X','Y','Facies',xmin,xmax,ymin,ymax,0,1,'Well Data - Porosity','X(m)','Y(m)','Facies (0-shale, 1-sand)',cmap)
plt.subplot(222)
GSLIB.locmap_st(df,'X','Y','Porosity',xmin,xmax,ymin,ymax,pormin,pormax,'Well Data - Porosity','X(m)','Y(m)','Porosity (fraction)',cmap)
plt.subplot(223)
GSLIB.locmap_st(df,'X','Y','Perm',xmin,xmax,ymin,ymax,permmin,permmax,'Well Data - Permeability','X(m)','Y(m)','Permeability (md)',cmap)
plt.subplot(224)
GSLIB.locmap_st(df,'X','Y','AI',xmin,xmax,ymin,ymax,AImin,AImax,'Well Data - Acoustic Impedance','X(m)','Y(m)','Acoustic Impedance (m/s x g/cm^3)',cmap)
plt.subplots_adjust(left=0.0, bottom=0.0, right=3.0, top=3.2, wspace=0.2, hspace=0.2)
plt.show()
###Output
_____no_output_____
###Markdown
Bivariate AnalysisLet's start with some simple bivariate plotting and calculations. Firsty some scatter plots.
###Code
plt.subplot(121)
plt.plot(df['Porosity'].values,df['Perm'].values, 'o', label='', markerfacecolor='red', markeredgecolor='black', alpha=0.2)
plt.title('Well Data Permeability vs. Porostiy')
plt.xlabel('Porosity (fraction)')
plt.ylabel('Permeability (mD)')
#plt.legend()
plt.subplot(122)
plt.plot(df['AI'].values,df['Porosity'].values, 'o', label='', markerfacecolor='red', markeredgecolor='black', alpha=0.2)
plt.title('Well Data Porostiy vs. Acoustic Impedance')
plt.ylabel('Porosity (fraction)')
plt.xlabel('Acoustic Impedance (m/s x g/cm^3)')
plt.subplots_adjust(left=0.0, bottom=0.0, right=2.2, top=1.2, wspace=0.2, hspace=0.2)
plt.show()
###Output
_____no_output_____
###Markdown
Correlation and CovarianceIt is straight forward to calculat the covariance and correlation from the pairs of data in our dataset. Here's the covariance. Notice that the matrix is symmetrical? Makes sense, as the $C_{Por,Perm} = C_{Perm,Por}$. Also, note that the diagonal values ($C_{i,j}$ where $i=j$) equal to the variance. We check porosity by calculating the variance.
###Code
print(df.iloc[:,3:7].cov()) # the covariance matrix for columns 3,4,5 and 6 and all rows
print('The variance of porosity is ' + str(round(np.var(df['Porosity'].values),6)))
###Output
Facies Porosity Perm AI
Facies 0.241085 0.011370 33.010926 -248.032706
Porosity 0.011370 0.000939 4.055029 -17.132244
Perm 33.010926 4.055029 52149.501968 -46471.695092
AI -248.032706 -17.132244 -46471.695092 949768.302409
The variance of porosity is 0.000936
###Markdown
Here's the correlation coefficient.
###Code
df.iloc[:,3:7].corr()
###Output
_____no_output_____
###Markdown
Matrix Scatter PlotsIf we have 3 or more variables to consider then matrix scatter plot offer an efficient method to display the multivariate relationships, 2 variables at a time. Once can identify:1. the range, envelope of the paired data2. homoscedastic and heteroscedastic behavoirs3. non-linear featuresHere's the seaborn package matrix scatter plot function, pairplot. Let's color the results by facies.
###Code
sns.pairplot(df, hue='Facies',vars=['Facies','Porosity','Perm','AI'],markers='o')
###Output
_____no_output_____
###Markdown
Joint, Conditional and MarginalsWe can use kernel density estimation to estimate the joint probabilities density function (pdf) for the paired data, a 2D pdf! We could use this to estimate any required joint, marginal and conditional probability (care must be taken with normalization). Let's use the seaborn package 'kdeplot' function to estimate the joint pdf for porosity and acoustic impedance.
###Code
ax = sns.kdeplot(df['AI'].values,df['Porosity'].values, shade=True, n_levels = 10,cmap=cmap,cbar= True, shade_lowest = False)
ax.set_xlabel('Acoustic Impedance (m/s x g/cm^3)'); ax.set_ylabel('Porosity (fraction)'); ax.set_title('Porosity vs. Acoustic Impedance')
###Output
_____no_output_____
###Markdown
I think is it useful to visualize the joint pdfs with the marginal pdfs on a single plot. We can use seaborn's 'jointplot' to accomplish this.
###Code
ax = sns.jointplot('AI','Porosity', df, kind='kde',shade = False, n_levels = 10,cmap=cmap, shade_lowest = True);
###Output
_____no_output_____
###Markdown
The correlation coefficient and the p-value of the correlation coefficient (significant if $1-\alpha/2$). Calculating Conditional StatisticsOf course, we could just calculate the conditional statistics by-hand. We need to select some bins over the variable that we will condition to. Let's calculate conditional statistical of porosity given acoustic impedance. We will select 9 equal spaced bins.
###Code
AI_bins = np.linspace(2000,8000,10) # set the bin boundaries and then the centroids for plotting
AI_centroids = np.linspace((AI_bins[0]+AI_bins[1])*0.5,(AI_bins[8]+AI_bins[9])*0.5,9)
print(AI_bins) # check the boundaries
print(AI_centroids) # check the centroids
df['AI_bins'] = pd.cut(df['AI'], AI_bins,labels = AI_centroids) # cut on bondaries and lable with centroids
df.head() # check the new column in the DataFrame
###Output
[2000. 2666.66666667 3333.33333333 4000. 4666.66666667
5333.33333333 6000. 6666.66666667 7333.33333333 8000. ]
[2333.33333333 3000. 3666.66666667 4333.33333333 5000.
5666.66666667 6333.33333333 7000. 7666.66666667]
###Markdown
Now we can use the 'groupby' function built-in to Pandas' DataFrames to extract subsets of porosity values in each bin from the DataFrame and then to calculate the conditional statistics: expectation, P90 and P10. Let's plot the result.
###Code
cond_exp = df.groupby('AI_bins')['Porosity'].mean()
cond_P90 = df.groupby('AI_bins')['Porosity'].quantile(.9)
cond_P10 = df.groupby('AI_bins')['Porosity'].quantile(.1)
plt.subplot(111)
plt.plot(AI_centroids,cond_exp,color='black')
plt.plot(AI_centroids,cond_P90,'r--',color='black',linewidth = 1.0)
plt.plot(AI_centroids,cond_P10,'r--',color='black',linewidth = 1.0)
plt.xlabel('Acoustic Impedance (m/s x g/cm^3)')
plt.ylabel('Porosity (fraction) | Acoustic Impedance')
t = plt.title('Porosity Conditional to Accoustic Impedance')
plt.ylim(pormin,pormax)
plt.xlim(AImin,AImax)
plt.text(3200, .10, 'P10')
plt.text(3200, .15, 'Expectation')
plt.text(3200, .19, 'P90')
plt.grid(True)
plt.subplots_adjust(left=0.0, bottom=0.0, right=1.2, top=1.2, wspace=0.2, hspace=0.2)
plt.show()
###Output
_____no_output_____ |
deeplearning1/nbs/char-rnn.ipynb | ###Markdown
Setup We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.
###Code
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
!tail -n 25 {path}
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
chars.insert(0, "\0")
''.join(chars[1:-6])
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
###Output
_____no_output_____
###Markdown
Preprocess and create model
###Code
maxlen = 40
sentences = []
next_chars = []
for i in range(0, len(idx) - maxlen+1):
sentences.append(idx[i: i + maxlen])
next_chars.append(idx[i+1: i+maxlen+1])
print('nb sequences:', len(sentences))
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
sentences.shape, next_chars.shape
n_fac = 24
model=Sequential([
Embedding(vocab_size, n_fac, input_length=maxlen),
LSTM(units=512, input_shape=(n_fac,),return_sequences=True, dropout=0.2, recurrent_dropout=0.2,
implementation=2),
Dropout(0.2),
LSTM(512, return_sequences=True, dropout=0.2, recurrent_dropout=0.2,
implementation=2),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
###Output
_____no_output_____
###Markdown
Train
###Code
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:] # [-40] picks up the last 40 chars
preds = model.predict(x, verbose=0)[0][-1] # [-1] picks up the last char
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.optimizer.lr=0.001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.optimizer.lr=0.0001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.save_weights('data/char_rnn.h5')
model.optimizer.lr=0.00001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
print_example()
model.save_weights('data/char_rnn.h5')
###Output
_____no_output_____
###Markdown
Table of Contents0.1 建立0.2 预处理和建立模型0.3 训练 _字符型语言模型_
###Code
%matplotlib inline
import imp
import utils
from utils import *
from __future__ import division, print_function
from keras.layers import TimeDistributed,Activation
from numpy.random import choice
###Output
C:\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
###Markdown
建立 We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.
###Code
path = get_file('nietzsche.txt',origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt") # 尼采的诗歌
text = open(path).read().lower()
print("语料库长度:",len(text))
#path = 'data/wiki/'
#text = open(path+'small.txt').read().lower()
#print('corpus length:', len(text))
#text = text[0:1000000]
# 字符型语言模型的字典,只包含不重复的字符,具体见吴恩达课程
chars = sorted(list(set(text)))
vocab_size = len(chars) + 1
print('total chars:',vocab_size)
chars.insert(0,"\0")
''.join(chars[1:-6])
char_indices = dict((c,i) for i,c in enumerate(chars)) # 字符到索引
indices_char = dict((i, c) for i, c in enumerate(chars)) # 索引 字符 字典
idx = [char_indices[c] for c in text] # 改预料转成数字索引数组
idx[:10]
# 再把数字索引转成字符
''.join(indices_char[i] for i in idx[:70])
###Output
_____no_output_____
###Markdown
预处理和建立模型
###Code
maxLen = 40 # 最大长度
sentences = [] # 生成的句子
next_chars = [] # 下一个字符
len(idx)-maxLen+1
for i in range(0,len(idx)-maxLen+1):
sentences.append(idx[i:i+maxLen])
next_chars.append(idx[i+1:i+maxLen+1]) # 下一个字符
print('nb sequences:', len(sentences))
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]]) # 拼接起来转化成np格式
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
sentences.shape,next_chars.shape
n_fac = 24 # 嵌入矩阵输出大小
# return_sequences: 布尔值。是返回输出序列中的最后一个输出,还是全部序列
# dropout: 在 0 和 1 之间的浮点数。 单元的丢弃比例,用于输入的线性转换。
# recurrent_dropout: 在 0 和 1 之间的浮点数。 单元的丢弃比例,用于循环层状态的线性转换。
# implementation: 实现模式,1 或 2。 模式 1 将把它的操作结构化为更多的小的点积和加法操作,
# 而模式 2 将把它们分批到更少,更大的操作中。 这些模式在不同的硬件和不同的应用中具有不同的性能配置文件。
model = Sequential([
Embedding(input_dim=vocab_size,output_dim=n_fac,input_length=maxLen),
LSTM(units=512,return_sequences=True,dropout=0.2,recurrent_dropout=0.2,implementation=2,input_shape=[None,n_fac]),
Dropout(0.2),
LSTM(units=512,return_sequences=True,dropout=0.2,recurrent_dropout=0.2,implementation=2),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')
])
model.compile(loss='sparse_categorical_crossentropy',optimizer=Adam())
# model=Sequential([
# Embedding(vocab_size, n_fac, input_length=maxLen),
# LSTM(512, input_dim=n_fac,return_sequences=True, dropout_U=0.2, dropout_W=0.2,
# consume_less='gpu'),
# Dropout(0.2),
# LSTM(512, return_sequences=True, dropout_U=0.2, dropout_W=0.2,
# consume_less='gpu'),
# Dropout(0.2),
# TimeDistributed(Dense(vocab_size)),
# Activation('softmax')
# ])
###Output
C:\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: UserWarning: The `input_dim` and `input_length` arguments in recurrent layers are deprecated. Use `input_shape` instead.
after removing the cwd from sys.path.
C:\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: UserWarning: Update your `LSTM` call to the Keras 2 API: `LSTM(512, return_sequences=True, input_shape=(None, 24), dropout=0.2, recurrent_dropout=0.2, implementation=2)`
after removing the cwd from sys.path.
C:\Anaconda3\lib\site-packages\ipykernel_launcher.py:7: UserWarning: Update your `LSTM` call to the Keras 2 API: `LSTM(512, return_sequences=True, dropout=0.2, recurrent_dropout=0.2, implementation=2)`
import sys
###Markdown
训练
###Code
np.expand_dims()
next_chars.shape
np.expand_dims(next_chars,-1).shape
sentences.shape
# 没看懂
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:]
preds = model.predict(x, verbose=0)[0][-1]
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64,epochs=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64,epochs=1)
print_example()
model.optimizer.lr=0.001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.0001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.save_weights('data/char_rnn.h5')
model.optimizer.lr=0.00001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64,epochs=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
print_example()
model.save_weights('data/char_rnn.h5')
###Output
_____no_output_____
###Markdown
Setup We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.
###Code
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
!tail {path} -n25
#path = 'data/wiki/'
#text = open(path+'small.txt').read().lower()
#print('corpus length:', len(text))
#text = text[0:1000000]
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
chars.insert(0, "\0")
''.join(chars[1:-6])
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
###Output
_____no_output_____
###Markdown
Preprocess and create model
###Code
maxlen = 40
sentences = []
next_chars = []
for i in range(0, len(idx) - maxlen+1):
sentences.append(idx[i: i + maxlen])
next_chars.append(idx[i+1: i+maxlen+1])
print('nb sequences:', len(sentences))
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
sentences.shape, next_chars.shape
n_fac = 24
model=Sequential([
Embedding(vocab_size, n_fac, input_length=maxlen),
LSTM(512, input_dim=n_fac,return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
LSTM(512, return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
###Output
_____no_output_____
###Markdown
Train
###Code
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:]
preds = model.predict(x, verbose=0)[0][-1]
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.0001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.save_weights('data/char_rnn.h5')
model.optimizer.lr=0.00001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
print_example()
model.save_weights('data/char_rnn.h5')
###Output
_____no_output_____
###Markdown
Setup We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.
###Code
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
!tail {path} -n25
#path = 'data/wiki/'
#text = open(path+'small.txt').read().lower()
#print('corpus length:', len(text))
#text = text[0:1000000]
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
chars.insert(0, "\0")
''.join(chars[1:-6])
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
###Output
_____no_output_____
###Markdown
Preprocess and create model
###Code
maxlen = 40
sentences = []
next_chars = []
for i in range(0, len(idx) - maxlen+1):
sentences.append(idx[i: i + maxlen])
next_chars.append(idx[i+1: i+maxlen+1])
print('nb sequences:', len(sentences))
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
sentences.shape, next_chars.shape
n_fac = 24
model=Sequential([
Embedding(vocab_size, n_fac, input_length=maxlen),
LSTM(512, input_dim=n_fac,return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
LSTM(512, return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
###Output
_____no_output_____
###Markdown
Train
###Code
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:]
preds = model.predict(x, verbose=0)[0][-1]
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.0001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.save_weights('data/char_rnn.h5')
model.optimizer.lr=0.00001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
print_example()
model.save_weights('data/char_rnn.h5')
###Output
_____no_output_____
###Markdown
Setup We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.
###Code
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt", cache_subdir='datasets')
text = open(path).read().lower()
print('corpus length:', len(text))
!tail {path} -n25
#path = 'data/wiki/'
#text = open(path+'small.txt').read().lower()
#print('corpus length:', len(text))
#text = text[0:1000000]
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
chars.insert(0, "\0")
''.join(chars[1:-6])
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
###Output
_____no_output_____
###Markdown
Preprocess and create model
###Code
maxlen = 40
sentences = []
next_chars = []
for i in range(0, len(idx) - maxlen+1):
sentences.append(idx[i: i + maxlen])
next_chars.append(idx[i+1: i+maxlen+1])
print('nb sequences:', len(sentences))
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
sentences.shape, next_chars.shape
n_fac = 24
model=Sequential([
Embedding(vocab_size, n_fac, input_length=maxlen),
BatchNormalization(),
LSTM(512, input_dim=n_fac, return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
LSTM(512, return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
TimeDistributed(Dense(512, activation='relu')),
Dropout(0.2),
TimeDistributed(Dense(vocab_size, activation='softmax'))
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
###Output
_____no_output_____
###Markdown
Train
###Code
def run_epochs(n, batch_size=64, x=sentences, y=np.expand_dims(next_chars,-1)):
mx = len(x)//batch_size*batch_size
for i in range(n):
model.reset_states()
h = model.fit(x[:mx], y[:mx], batch_size=batch_size, nb_epoch=1, shuffle=False, verbose=0)
print(h.history['loss'])
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:]
preds = model.predict(x, verbose=0)[0][-1]
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.0001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.save_weights('data/char_rnn.h5')
model.optimizer.lr=0.00001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
print_example()
model.save_weights('data/char_rnn.h5')
###Output
_____no_output_____
###Markdown
Setup We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.
###Code
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
!tail {path} -n25 #25 means last 25 lines of the thing
#path = 'data/wiki/'
#text = open(path+'small.txt').read().lower()
#print('corpus length:', len(text))
#text = text[0:1000000]
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
chars.insert(0, "\0")
''.join(chars[1:-6])
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
###Output
_____no_output_____
###Markdown
Preprocess and create model
###Code
maxlen = 40
sentences = []
next_chars = []
for i in range(0, len(idx) - maxlen+1):
sentences.append(idx[i: i + maxlen])
next_chars.append(idx[i+1: i+maxlen+1])
print('nb sequences:', len(sentences))
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
sentences.shape, next_chars.shape
n_fac = 24
model=Sequential([
Embedding(vocab_size, n_fac, input_length=maxlen),
LSTM(512, input_dim=n_fac,return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
LSTM(512, return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
###Output
_____no_output_____
###Markdown
Train
###Code
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:]
preds = model.predict(x, verbose=0)[0][-1]
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.0001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.save_weights('data/char_rnn.h5')
model.optimizer.lr=0.00001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
print_example()
model.save_weights('data/char_rnn.h5')
###Output
_____no_output_____
###Markdown
Setup We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.
###Code
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
!tail {path} -n25
#path = 'data/wiki/'
#text = open(path+'small.txt').read().lower()
#print('corpus length:', len(text))
#text = text[0:1000000]
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
chars.insert(0, "\0")
''.join(chars[1:-6])
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
###Output
_____no_output_____
###Markdown
Preprocess and create model
###Code
maxlen = 40
sentences = []
next_chars = []
for i in range(0, len(idx) - maxlen+1):
sentences.append(idx[i: i + maxlen])
next_chars.append(idx[i+1: i+maxlen+1])
print('nb sequences:', len(sentences))
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
sentences.shape, next_chars.shape
n_fac = 24
model=Sequential([
Embedding(vocab_size, n_fac, input_length=maxlen),
LSTM(512, input_dim=n_fac,return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
LSTM(512, return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
###Output
_____no_output_____
###Markdown
Train
###Code
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:]
preds = model.predict(x, verbose=0)[0][-1]
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.0001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.save_weights('data/char_rnn.h5')
model.optimizer.lr=0.00001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
print_example()
model.save_weights('data/char_rnn.h5')
###Output
_____no_output_____
###Markdown
Setup We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.
###Code
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
!tail {path} -n25
#path = 'data/wiki/'
#text = open(path+'small.txt').read().lower()
#print('corpus length:', len(text))
#text = text[0:1000000]
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
chars.insert(0, "\0")
''.join(chars[1:-6])
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
###Output
_____no_output_____
###Markdown
Preprocess and create model
###Code
maxlen = 40
sentences = []
next_chars = []
for i in range(0, len(idx) - maxlen+1):
sentences.append(idx[i: i + maxlen])
next_chars.append(idx[i+1: i+maxlen+1])
print('nb sequences:', len(sentences))
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
sentences.shape, next_chars.shape
n_fac = 24
model=Sequential([
Embedding(vocab_size, n_fac, input_length=maxlen),
LSTM(512, input_dim=n_fac,return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
LSTM(512, return_sequences=True, dropout_U=0.2, dropout_W=0.2,
consume_less='gpu'),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
###Output
_____no_output_____
###Markdown
Train
###Code
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:]
preds = model.predict(x, verbose=0)[0][-1]
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.optimizer.lr=0.0001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.save_weights('data/char_rnn.h5')
model.optimizer.lr=0.00001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, nb_epoch=1)
print_example()
print_example()
model.save_weights('data/char_rnn.h5')
###Output
_____no_output_____ |
DP/Gamblers Problem.ipynb | ###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
V = np.zeros(101)
rewards = np.zeros(101)
rewards[100] = 1
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A = np.zeros(101)
stakes = range(1, min(s, 100-s)+1) # Your minimum bet is 1, maximum bet is min(s, 100-s).
for a in stakes:
# rewards[s+a], rewards[s-a] are immediate rewards.
# V[s+a], V[s-a] are values of the next states.
# This is the core of the Bellman equation: The expected value of your action is
# the sum of immediate rewards and the value of the next state.
A[a] = p_h * (rewards[s+a] + V[s+a]*discount_factor) + (1-p_h) * (rewards[s-a] + V[s-a]*discount_factor)
return A
# Start with a random policy
policy = [np.arange(1+s)/(1+s)for s in range( 101)]
while True:
# Evaluate the current policy
for i in range(2):
V = np.array([max(one_step_lookahead(s, V, rewards)) for s in range(101)])
# Will be set to false if we make any changes to the policy
policy_stable = True
# For each state...
for s in range(101):
# The best action we would take under the current policy
chosen_a = np.argmax(policy[s])
# Find the best action by one-step lookahead
# Ties are resolved arbitarily
action_values = one_step_lookahead(s, V, rewards)
best_a = np.argmax(action_values)
# Greedily update the policy
if chosen_a != best_a:
policy_stable = False
policy[s] = np.eye(101)[best_a]
# If the policy is stable we've found an optimal policy. Return it
if policy_stable:
return policy, V
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
states = np.arange(101)
stakes = [np.argmax(policy[s]) for s in states]
plt.plot(states, stakes)
plt.show()
print(stakes)
plt.plot(states, v)
plt.show()
print(v)
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.001, discount_factor=0.3):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def policy_evaluation(V, p):
reward = np .zeros((101))
reward[100] = 1
z = 0
while True:
delta = 0
r_actions = []
for i, state in enumerate(V):
if (i == 0 or i == 100):
continue
r_actions = one_step_lookahead(i, V, reward)
# print(i, r_actions)
best_action = np.max(r_actions)
delta = max(delta, best_action - V[i])
V[i] = best_action
z += 1
if (delta < theta):
break
return V
def policy_greedy(V, policy, p_h):
P1 = []
reward = np .zeros((101))
reward[100] = 1
best = 0
val = 0
for i in range (1, 100):
p = [0] * 100
# action = np.argmax(one_step_lookahead(i, V, reward))
# p[action] = 1
tmp = []
for j in range (1, min(100 - i, i) + 1):
tmp = p_h *(reward[min(100, i + j)] + discount_factor * V[min(100, i + j)]) + (1 - p_h) * ( discount_factor * V[i - j] + reward[i -j])
# print("i", i, "j", j, "i + j",V[max(100,i + j)], "i - j", V[max(0,i - j)], "val",tmp)
if (tmp > val):
val = tmp
best = j
p[best] = 1
P1.append(p)
return P1
def one_step_lookahead(s, V, reward):
"""
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A = []
i = s
for j in range(1, min(s, 100 -s) + 1):
win = min(i + j, 100)
lose = i - j
tmp = (p_h * (reward[win] + discount_factor * V[win]) + (1 - p_h) * (reward[lose] + discount_factor * V[lose]))
A.append(tmp)
# print("A", A)
return A
V = np.zeros((101))
policy = np.zeros((99, 99))
# for i, state in enumerate(V):
# for j, action in enumerate(policy[i][:i + 1]):
# if (i < 100):
# policy[i][j] = 1 / len(policy[:i + 1])
# policy[0][0] = 1
# print(policy)
V = policy_evaluation(V, p_h)
policy = policy_greedy(V, policy, p_h)
# V = policy_evaluation(V, policy, p_h)
# Implement!
return policy, V
policy, v = value_iteration_for_gamblers(p_h = 0.55)
print("Optimized Policy:")
print(policy[1])
print("")
print("Optimized Value Function:")
print(v[:100].reshape((10,10)))
print(v[100])
print("")
# Plotting Final Policy (action stake) vs State (Capital)
import matplotlib.pyplot as plt
plt.plot(v[:100])
plt.show()
# Implement!
# Plotting Capital vs Final Policy
X = [x for x in range(99)]
Y = [np.argmax(policy[x]) for x in X]
plt.bar(X, Y)
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A = np.zeros(101)
for a in range(1,min(s, 100 - s)+1):
A[a] = p_h * (rewards[a+s] + V[a+s]) + (1 - p_h) * (rewards[s-a] + V[s-a])
return A
rewards = np.zeros(101)
rewards[100] = 1
V = np.zeros(101)
policy = np.zeros(100)
while True:
delta = 0
for s in range(1,100):
A = one_step_lookahead(s, V, rewards)
delta = max(delta, abs(V[s] - max(A)))
V[s] = max(A)
if delta < theta:
break
for s in range(1,100):
A = one_step_lookahead(s, V, rewards)
policy[s] = np.argmax(A)
return policy, V
policy, v = value_iteration_for_gamblers(0.55)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
plt.plot(v[:100])
plt.xlabel("State")
plt.ylabel("Value")
# Implement!
# Plotting Capital vs Final Policy
plt.plot(policy)
plt.xlabel("State")
plt.ylabel("Best Policy")
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
return A
# Implement!
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
# Plotting Capital vs Final Policy
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
max_stake = min(s, 100-s)
A = np.zeros(101)
# iterate over possible actions
for stake in range(1,max_stake+1):
# tuple: (prob, next_state, reward, done)
# if win:
s_win = (p_h, s+stake, int(s+stake == 100), s+stake == 100)
# if lose:
s_lose = (1-p_h, s-stake, 0, s-stake == 0)
outcomes = [s_win, s_lose]
for trans_prob, next_state, reward, done in outcomes:
A[stake] += trans_prob * (reward + discount_factor * V[next_state])
return A
goal = 100
policy = np.zeros(goal)
V = np.zeros(goal+1)
while True:
delta = 0
# iterate over each state
for s in range(1,goal):
old_v = V[s]
# get q values for each action (stake) available from current state
qs = one_step_lookahead(s, V)
# find and set new action
policy[s] = np.argmax(qs)
# set new value estimate
V[s] = np.max(qs)
# revise max change in value estimate
delta = max(delta, abs(V[s] - old_v))
# create final policy using final value function
for s in range(1,goal):
qs = one_step_lookahead(s, V)
policy[s] = np.argmax(qs)
if delta < theta:
return policy, V
policy, v = value_iteration_for_gamblers(0.5)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
plt.plot(range(100), v[:100])
plt.xlabel('Capital')
plt.ylabel('Value Estimates')
plt.title('Final Policy (action stake) vs State (Capital)')
plt.show()
# Plotting Capital vs Final Policy
# x axis values
x = range(100)
# corresponding y axis values
y = policy
# plotting the bars
plt.bar(x, y, align='center', alpha=0.5)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Final policy (stake)')
# giving a title to the graph
plt.title('Capital vs Final Policy')
# function to show the plot
plt.show()
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A = np.zeros(101)
for a in range(1, min(s, 100-s)+1):
A[a] = p_h * (rewards[s+a] + discount_factor*V[s+a]) + (1-p_h) * (rewards[s-a] + discount_factor*V[s-a])
return A
rewards = np.zeros(101)
rewards[100] = 1
V = np.zeros(101)
policy = np.zeros(101)
while True:
delta = 0
for s in range(1, 101):
A = one_step_lookahead(s, V, rewards)
v = np.max(A)
delta = max(delta, np.abs(V[s]-v))
V[s] = v
if delta < theta:
break
for s in range(1, 101):
A = one_step_lookahead(s, V, rewards)
policy[s] = np.argmax(A)
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
# Plotting Capital vs Final Policy
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A = np.zeros(101)
for a in range(1, min(s, 100-s) + 1):
A[a] += p_h * (rewards[s+a] + V[s+a]*discount_factor) + (1-p_h) * (rewards[s-a] + V[s-a]*discount_factor)
return A
# Implement!
rewards = np.zeros(101)
rewards[100] = 1
V = np.zeros(101)
while True:
# Stopping condition
delta = 0
# Update each state...
for s in range(1, 100):
# Do a one-step lookahead to find the best action
A = one_step_lookahead(s, V, rewards)
best_action_value = np.max(A)
# Calculate delta across all states seen so far
delta = max(delta, np.abs(best_action_value - V[s]))
# Update the value function. Ref: Sutton book eq. 4.10.
V[s] = best_action_value
# Check if we can stop
if delta < theta:
break
# Create a deterministic policy using the optimal value function
policy = np.zeros([100, 100])
for s in range(1, 100):
# One step lookahead to find the best action for this state
A = one_step_lookahead(s, V, rewards)
best_action = np.argmax(A)
# Always take the best action
policy[s, best_action] = 1.0
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Plotting Final Policy (action stake) vs State (Capital)
# x axis values
x = range(100)
# corresponding y axis values
y = v[:100]
# plotting the points
plt.plot(x, y)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Value Estimates')
# giving a title to the graph
plt.title('Final Policy (action stake) vs State (Capital)')
# function to show the plot
plt.show()
# Plotting Capital vs Final Policy
# Implement!
# x axis values
x = range(100)
# corresponding y axis values
y = policy
# plotting the bars
plt.bar(x, y, align='center', alpha=0.5)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Final policy (stake)')
# giving a title to the graph
plt.title('Capital vs Final Policy')
# function to show the plot
plt.show()
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
return A
# Implement!
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
# Plotting Capital vs Final Policy
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
# states are 0 to 100, 0 and 100 are terminal
# agent can play between states 1 and 99
# only state 100 has reward of 1
# state values are initialized as 0.
states = list(range(0,101,1))
actions = list(range(1,99,1))
rewards = np.append(np.zeros(100),1)
#depending on state an action there are two possible outcomes
def transition(state, selected_action):
return [state+selected_action, state-selected_action]
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# print("state is", s)
stakes = range(1, min(s, 100-s)+1)
# print("possible actions ",stakes)
action_values = np.zeros(101)
for action in stakes:
# print ("in state ",s," evaluating action : ", action)
# for prob, next_state, reward, done in env.P[state][action]:
# action_values[action] += prob * (reward + discount_factor * V[next_state])
outcomes = transition(s,action)
# print ("possile outcomes to be updated are ",outcomes)
# print ("reward of heads :", rewards[outcomes[0]])
action_values[action] = p_h * (rewards[outcomes[0]]+ discount_factor * V[outcomes[0]])
action_values[action] += (1-p_h) * (rewards[outcomes[1]] + discount_factor * V[outcomes[1]])
# print("action values for 1 step lookahead : ", action_values)
return action_values
V = np.zeros(101)
# policy is, for each state, how much to bet. So for each state from 1-99, the action to be taken.
# For implementational ease state 0 is included too, it should be 0 so can be used to check too.
policy = np.zeros([100])
count = 0
#not needed list to keep action_values for different states
action_values_for_states = [0] * 101
while True:
count += 1
change = 0
for state in range(100):
action_values = one_step_lookahead(state, V,rewards)
highest_action_value = max(action_values)
change = max(change, abs( highest_action_value - V[state]))
V[state] = highest_action_value
policy[state] = np.argmax(action_values)
action_values_for_states[state] = action_values
if change < theta:
break
print ('the agent found the policy in ', count, 'iterations')
return policy, V, action_values_for_states
def action_value_plotter(action_values, state, probability):
"""
takes action values of a state and plots it
"""
x = list(range(0,101,1))
y = action_values
plt.plot(x, y, 'g')
plt.xlabel('Actions')
plt.ylabel('Action Values')
tit = 'When agent has '+ str(state)+ ' and when the p(heads) is '+ str(probability*100)+ '%'
plt.title(tit)
plt.show()
def policy_plotter(policy, probability):
"""
takes the policy and probability then plots it
"""
x = list(range(0,100,1))
y = policy
plt.bar(x, y, color='r', align='center', alpha=0.5)
plt.xlabel('State')
plt.ylabel('Stake')
tit = 'Policy (State -> Stack) for p(X=heads) = ',probability
plt.title(tit)
plt.show()
def state_value_plotter(v, probability):
"""
takes state values then plots it
"""
x = list(range(0,101,1))
y = v
plt.plot(x, y)
plt.xlabel('State')
plt.ylabel('Value')
tit = 'State Values for p(X=heads) = ',probability
plt.title(tit)
# function to show the plot
plt.show()
probability_heads = 0.25
policy, v, action_values = value_iteration_for_gamblers(probability_heads)
action_value_plotter(action_values[75],75,probability_heads)
policy_plotter(policy,probability_heads)
state_value_plotter(v, probability_heads)
probability_heads = 0.55
policy, v, action_values = value_iteration_for_gamblers(probability_heads)
action_value_plotter(action_values[75],75,probability_heads)
policy_plotter(policy,probability_heads)
state_value_plotter(v, probability_heads)
probability_heads = 0.90
policy, v, action_values = value_iteration_for_gamblers(probability_heads)
action_value_plotter(action_values[75],75,probability_heads)
policy_plotter(policy,probability_heads)
state_value_plotter(v, probability_heads)
###Output
the agent found the policy in 26 iterations
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A = np.zeros(min(s, 100 - s) + 1)
for a in range(1, min(s, 100 - s) + 1):
A[a] = p_h * (V[s + a] * discount_factor + rewards[s + a]) \
+ (1 - p_h) * (V[s - a] * discount_factor + rewards[s - a])
# Implement!
#print("Current s: {}".format(s))
#print(A)
return A
goal = 100
V = np.zeros(goal + 1)
rewards = np.zeros(goal + 1)
rewards[goal] = 1.0
# Implement!
while True:
delta = 0
for s in range(1, goal):
A = one_step_lookahead(s, V, rewards)
best_value = np.max(A)
delta = max(delta, abs(V[s] - best_value))
V[s] = best_value
print('iter')
if delta < theta:
break
policy = np.zeros(goal)
for s in range(goal):
A = one_step_lookahead(s, V, rewards)
a = np.argmax(A)
policy[s] = a
return policy, V
policy, v = value_iteration_for_gamblers(0.4)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
v = v[:100]
plt.plot(v)
plt.show()
# Plotting Capital vs Final Policy
# Implement!
x = range(100)
y = policy
plt.bar(x, y, align='center', alpha=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
# Import libraries
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
return A
# Implement!
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
# Plotting Capital vs Final Policy
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
A = np.zeros_like(V)
can_bet_n = min(s, 100 - s)
for bet in range(1, can_bet_n + 1):
A[bet] = p_h * (rewards[min(s + bet, 100)] + discount_factor * V[min(s + bet, 100)]) + \
(1 - p_h) * (rewards[s - bet] + discount_factor * V[s - bet])
return A
# Implement!
policy = np.ones(101)
V = np.zeros(101)
rewards = np.zeros(101)
rewards[100] = 1
while True:
v_next_state = np.zeros_like(V)
distance = 0
for s in range(1, 1 + 100):
A = one_step_lookahead(s, V, rewards)
v_next_state[s] = np.max(A)
distance = max(distance, np.abs(V[s] - v_next_state[s]))
if distance < theta:
break
else:
V = v_next_state.copy()
for s in range(1, 1 + 100):
A = one_step_lookahead(s, V, rewards)
policy[s] = np.argmax(A)
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
plt.figure(figsize=(5, 4), dpi=170)
plt.plot(range(len(v)), v)
# Implement!
# Plotting Capital vs Final Policy
plt.figure(figsize=(5, 4), dpi=170)
plt.bar(range(len(policy)), policy)
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
from lib.envs.gambler import GamblerEnv
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(env, theta=0.0001, discount_factor=1.0):
# Implement!
# value iteration for gambler
V = np.zeros(env.nS)
V[env.nS-1] = 1.0
policy = np.zeros([env.nS, env.nA+1])
def one_step_lookahead(env, s, V):
A = np.zeros(env.nA+1)
for _, a in enumerate(env.P[s]):
for prob, next_state, reward, done in env.P[s][a]:
A[a] += prob*(reward + discount_factor*V[next_state])
return A
# 1. value iteration
while True:
delta = 0
for s in range(1, env.nS-1):
# print("env state: {}, actions {}".format(s, env.P[s]))
A = one_step_lookahead(env, s, V)
best_action_value = np.max(A)
delta = max(delta, np.abs(best_action_value - V[s]))
V[s] = best_action_value
if delta < theta:
break
# 2. output policy
for s in range(1, env.nS-1):
A = one_step_lookahead(env, s, V)
policy[s] = np.eye(env.nA+1)[np.argmax(A)]
V[s] = np.max(A)
return policy, V
env = GamblerEnv(0.25)
print(env)
policy, v = value_iteration_for_gamblers(env)
print("Optimized Policy:")
print(policy)
print("")
s = 50
print("Optimized Value Function: v({})={}".format(s, v[s]))
print(v)
print("")
# Implement!
# Plotting Final Policy (action stake) vs State (Capital)
# x axis values
x = range(100)
# corresponding y axis values
y = v[:100]
# plotting the points
plt.plot(x, y)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Value Estimates')
# giving a title to the graph
plt.title('Final Policy (action stake) vs State (Capital)')
# function to show the plot
plt.show()
# Plotting Capital vs Final Policy
# Implement!
# x axis values
x = range(101)
# corresponding y axis values
y = np.argmax(policy, 1)
# plotting the bars
plt.bar(x, y, align='center', alpha=0.5)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Final policy (stake)')
# giving a title to the graph
plt.title('Capital vs Final Policy')
# function to show the plot
plt.show()
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
A = np.zeros(101)
stakes = range(1, min(s, 100-s)+1)
for a in stakes:
A[a] = p_h * (reward[s + a] + discount_factor * V[s + a]) + (1 - p_h) * (reward[s - a] + discount_factor * V[s - a])
return A
# Implement!
reward = np.zeros(101)
reward[100] = 1
V = np.zeros(101)
while True:
delta = 0
for s in range(1, 100):
A = one_step_lookahead(s, V, reward)
best_action = max(A)
delta = max(delta, np.abs(best_action - V[s]))
V[s] = best_action
if delta < theta:
break
policy = np.zeros(100)
for s in range(1, 100):
A = one_step_lookahead(s, V, reward)
best_action = np.argmax(A)
policy[s] = best_action
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
# Plotting Capital vs Final Policy
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
A = [0 for _ in range(101)]
for i in range(1, min(s,100-s)+1):
A[i] = p_h*(rewards[s+i]+discount_factor*V[s+i])+(1-p_h)*(rewards[s-i]+discount_factor*V[s-i])
return A
# Implement!
rewards = [0 for _ in range(101)]
rewards[100] = 1
V = [0 for _ in range(101)]
while True:
delta = 0
for s in range(1, 100):
A = one_step_lookahead(s, V, rewards)
best_action_value = np.max(A)
delta = max(delta, np.abs(best_action_value-V[s]))
V[s] = best_action_value
if delta<theta:
break
policy = [0 for _ in range(101)]
for s in range(1, 100):
A = one_step_lookahead(s, V, rewards)
policy[s] = np.argmax(A)
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
x = range(100)
y = v[:100]
plt.plot(x,y)
plt.xlabel('Capital')
plt.ylabel('Value')
plt.title('Final Policy (action stake) vs State (Capital)')
plt.show()
# Plotting Capital vs Final Policy
# Implement!
x = range(100)
y = policy[:100]
plt.bar(x, y, align='center', alpha=0.5)
plt.xlabel('capital')
plt.ylabel('final policy')
plt.title('Capital vs Final policy')
plt.show()
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=1e-8, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
actions = np.zeros(min(s + 1, 101 - s))
for a in range(1, len(actions)):
actions[a] = (1 - p_h) * discount_factor * V[s - a]
if a + s >= 100:
actions[a] += p_h
else:
actions[a] += p_h * discount_factor * V[s + a]
return actions
V = np.zeros(100)
while True:
max_delta = 0
for s in range(100):
actions = one_step_lookahead(s, V)
max_delta = max(max_delta, abs(V[s] - np.max(actions)))
V[s] = np.max(actions)
if max_delta < theta:
break
policy = np.zeros(100)
for s in range(100):
actions = one_step_lookahead(s, V)
policy[s] = np.argmax(actions)
return policy, V
policy, v = value_iteration_for_gamblers(0.55)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
plt.plot(np.arange(1, 100), v[1: 100]);
# Plotting Capital vs Final Policy
plt.bar(np.arange(1, 100), policy[1: 100]);
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
na = min(s, 100 - s)
A = np.zeros(na + 1)
for a in range(na):
A[a] = p_h * (rewards[s + a] + V[s + a]) + (1 - p_h) * (rewards[s - a] + V[s - a])
return A
ns = 100
V = np.zeros(ns)
rewards = np.zeros(ns)
rewards[-1] = 1
while True:
delta = 0
for s in range(1, ns - 1):
action_values = one_step_lookahead(s, V, rewards)
best_action_value = np.max(action_values)
delta = max(delta, np.abs(best_action_value - V[s]))
V[s] = best_action_value
if delta < theta:
break
policy = np.zeros((ns, ns))
for s in range(1, ns):
A = one_step_lookahead(s, V, rewards)
best_action = np.argmax(A)
policy[s, best_action] = 1.0
return policy, V
policy, v = value_iteration_for_gamblers(0.55)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
v.shape
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
# Plotting Final Policy (action stake) vs State (Capital)
# x axis values
x = range(100)
# corresponding y axis values
y = v[:100]
# plotting the points
plt.plot(x, y)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Value Estimates')
# giving a title to the graph
plt.title('Final Policy (action stake) vs State (Capital)')
# function to show the plot
plt.show()
# Plotting Capital vs Final Policy
# x axis values
x = range(100)
# corresponding y axis values
y = np.argmax(policy, 1)
# plotting the bars
plt.bar(x, y, align='center', alpha=0.5)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Final policy (stake)')
# giving a title to the graph
plt.title('Capital vs Final Policy')
# function to show the plot
plt.show()
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
A = np.zeros([100, 1])
for bet in range(1, np.min([s, 100-s])+1):
state_if_head = s + bet
state_if_tail = s - bet
A[bet] = p_h * V[state_if_head] + (1-p_h) * V[state_if_tail]
return A
# Implement!
V = np.zeros([101, 1])
V[100] = 1
policy = np.zeros([100, 1], dtype = np.int8)
while True:
delta = 0
for state in range(1,100):
A = one_step_lookahead(state,V)
best_action = np.argmax(A)
policy[state] = best_action
for state in range(1,100):
A = one_step_lookahead(state, V)
new_value = A[policy[state]]
delta = np.max([delta, abs(new_value - V[state])])
V[state] = new_value
if delta < theta:
break
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
figure = plt.figure()
ax = figure.subplots()
ax.bar(np.arange(100), policy[:,0])
ax.set_xlabel("Capital")
ax.set_ylabel("Best bet")
# Plotting Capital vs Final Policy
figure = plt.figure()
ax = figure.subplots()
ax.plot(np.arange(100), v[0:100])
ax.set_xlabel("Capital")
ax.set_ylabel("Best bet")
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.000001, discount_factor=0.9):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A = np.zeros(101)
for a in range(0, min(s, 100 - s) + 1):
# heads
A[a] += p_h * (rewards[s + a] + discount_factor * V[s + a])
# tails
A[a] += (1 - p_h) * (rewards[s - a] + discount_factor * V[s - a])
return A
V = np.zeros(101)
rewards = np.zeros(101)
rewards[100] = 1
# find optimal value function
while True:
last_V = V.copy()
V = np.zeros(101)
for s in range(1, 100):
action_values = one_step_lookahead(s, last_V, rewards)
V[s] = max(action_values)
if max(abs(V - last_V)) < theta:
break
# while True:
# # Stopping condition
# delta = 0
# # Update each state...
# for s in range(1, 100):
# # Do a one-step lookahead to find the best action
# A = one_step_lookahead(s, V, rewards)
# # print(s,A,V) # if you want to debug.
# best_action_value = np.max(A)
# # Calculate delta across all states seen so far
# delta = max(delta, np.abs(best_action_value - V[s]))
# # Update the value function. Ref: Sutton book eq. 4.10.
# V[s] = best_action_value
# # Check if we can stop
# if delta < theta:
# break
# find optimal policy (acting greedily on optimal value function)
policy = np.zeros(100)
for s in range(1, 100):
action_values = one_step_lookahead(s, V, rewards)
greedy_action = np.argmax(action_values)
policy[s] = greedy_action
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# x axis values
x = range(100)
# corresponding y axis values
y = v[:100]
# plotting the points
plt.plot(x, y)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Value Estimates')
# giving a title to the graph
plt.title('Final Policy (action stake) vs State (Capital)')
# function to show the plot
plt.show()
# Plotting Capital vs Final Policy
# x axis values
x = range(100)
# corresponding y axis values
y = policy
# plotting the bars
plt.bar(x, y, align='center', alpha=0.5)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Final policy (stake)')
# giving a title to the graph
plt.title('Capital vs Final Policy')
# function to show the plot
plt.show()
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.状态是当前赌徒的钱动作是压多少钱奖励只有在达成赢100的目标才是1,其他为0策略是从资金到压多少钱的映射The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
# rewards
rewards = np.zeros(101)
rewards[100] = 1 # 0-100种状态的reward
# We introduce two dummy states corresponding to termination with capital of 0 and 100
V = np.zeros(101) # state = 0 or 100 end
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
# actions
stakes = range(1, min(s, 100-s)+1)
# aciton-value table
A = np.zeros(101)
for a in stakes:
# next states are s+a or s-a.
# reward[s+a] and reward[s-a] are immediate reward.
# V[s+a] and V[s-a] are next state value.
A[a] = p_h * (rewards[s+a] + discount_factor*V[s+a]) + (1-p_h) * (rewards[s-a] + discount_factor*V[s-a])
return A
# Implement!
while True:
delta = 0
for s in range(1, 100):
# evaluate the actions
A = one_step_lookahead(s, V, rewards)
best_act_value = np.max(A)
delta = max(delta, np.abs(best_act_value-V[s])) # 要最大的误差啊,sb
V[s] = best_act_value
if delta < theta:
break
policy = np.zeros(100)
for s in range(1, 100):
A = one_step_lookahead(s, V, rewards)
best_act = np.argmax(A)
policy[s] = best_act
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State Value
# Implement!
x = range(100) # final policy: 1-99,0没用
y = v[:-1] # 去掉100,保留0
plt.figure()
plt.plot(x, y)
plt.xlabel('State')
plt.ylabel('Value')
plt.title('State vs Value')
plt.show()
# Plotting Capital vs Final Policy
# Implement!
x = range(100)
y = policy
plt.figure()
plt.bar(x, y, align='center', alpha=0.5)
plt.xlabel('Capital')
plt.ylabel('Final Policy')
plt.title('Capital vs Final Policy')
plt.show()
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
A = np.zeros(51)
for a in range(min(s,100-s)+1):
A[a] += p_h*(rewards[s+a]+discount_factor*V[s+a]) + (1-p_h)*(rewards[s-a]+discount_factor*V[s-a])
return A
# Implement!
rewards = np.zeros(101)
rewards[100] = 1
V = np.zeros(101)
while True:
delta = 0
for s in range(1,100):
v = V[s]
A = one_step_lookahead(s,V,rewards)
V[s] = np.max(A)
delta = max(delta,abs(v-V[s]))
if delta<theta:
break
policy = np.zeros(100)
for s in range(1, 100):
# One step lookahead to find the best action for this state
A = one_step_lookahead(s, V, rewards)
best_action = np.argmax(A)
# Always take the best action
policy[s] = best_action
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
# x axis values
x = range(100)
# corresponding y axis values
y = v[:100]
# plotting the points
plt.plot(x, y)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Value Estimates')
# giving a title to the graph
plt.title('Final Policy (action stake) vs State (Capital)')
# function to show the plot
plt.show()
# Plotting Capital vs Final Policy
# Implement!
# x axis values
x = range(100)
# corresponding y axis values
y = policy
# plotting the bars
plt.bar(x, y, align='center', alpha=0.5)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Final policy (stake)')
# giving a title to the graph
plt.title('Capital vs Final Policy')
# function to show the plot
plt.show()
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
rewards = np.zeros(101)
rewards[100] = 1
V = np.zeros(101)
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
number_of_state = min(s,100-s)
A = np.zeros(number_of_state + 1)
for a in range(number_of_state + 1):
A[a] = p_h* (rewards[s + a] + discount_factor * V[s + a]) + (1-p_h) * (rewards[s-a] + V[s-a]*discount_factor)
return A
while True:
delta = 0
for s in range(1,101):
v = V[s]
best_v = one_step_lookahead(s , V , rewards)
delta = max(abs(V[s] - v),delta)
V[s] = max(best_v)
if delta < theta:
break
policy = np.zeros(101)
for s in range(1,101):
lookUp_policy = one_step_lookahead(s, V , rewards)
best_action = np.argmax(lookUp_policy)
policy[s] = best_action
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy[0:100])
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
# Plotting Capital vs Final Policy
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
V = np.zeros(101)
rewards = np.zeros(101)
rewards[100] = 1
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A = np.zeros(min(s, 100-s)+1)
for a in range(1,len(A)):
A[a] = (p_h * (rewards[s+a] + discount_factor * V[s+a])) + ((1-p_h) * (rewards[s-a] + discount_factor * V[s-a]))
return A
# Implement!
while True:
delta = 0
for s in range(1, 101):
v = V[s]
V[s] = np.max(one_step_lookahead(s, V, rewards))
delta = max(delta, abs(v - V[s]))
if delta < theta:
break
policy = np.zeros(100)
for s in range(len(policy)):
policy[s] = np.argmax(one_step_lookahead(s, V, rewards))
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
plt.plot(range(101), v)
plt.show()
# Implement!
# Plotting Capital vs Final Policy
plt.bar(range(100), policy)
plt.show()
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
nA=100
nS=101
rewards=np.zeros(nS)
rewards[100]=1.0
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A=np.zeros(nA)
# Implement!
for a in range(1,min(s,100-s)+1):
A[a]=p_h*(rewards[s+a]+discount_factor*V[s+a])+(1-p_h)*(V[s-a]*discount_factor+rewards[s-a])
return A
# Implement!
V=np.zeros(nS)
policy=np.zeros(nS)
while True:
delta=0.0
for s in range(nS):
A=one_step_lookahead(s,V,rewards)
delta=max(delta,np.abs(V[s]-max(A)))
V[s]=max(A)
if delta<theta:
break
for s in range(nS):
policy[s]=np.argmax(one_step_lookahead(s, V, rewards))
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
# Implement!
# x axis values
x = range(100)
# corresponding y axis values
y = v[:100]
# plotting the points
plt.plot(x, y)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Value Estimates')
# giving a title to the graph
plt.title('Final Policy (action stake) vs State (Capital)')
# function to show the plot
plt.show()
# Plotting Capital vs Final Policy
x = range(101)
# corresponding y axis values
y = policy
# plotting the bars
plt.bar(x, y, align='center', alpha=0.5)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Final policy (stake)')
# giving a title to the graph
plt.title('Capital vs Final Policy')
# function to show the plot
plt.show()
# Implement!
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
rewards = np.zeros(101)
rewards[100] = 1
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
# Implement!
action_values = np.zeros(101)
stakes = range(1, min(s, 100-s)+1)
for a in stakes:
action_values[a] = p_h * (rewards[s+a] + V[s+a]*discount_factor) + (1-p_h) * (rewards[s-a] + V[s-a]*discount_factor)
return action_values
V = np.zeros(101)
# Implement!
while True:
contractionMappingChange = 0
for s in range(1, 100):
v = V[s]
new_v = np.max(one_step_lookahead(s, V, rewards))
contractionMappingChange = max(contractionMappingChange, np.abs(v-new_v))
V[s] = new_v
if contractionMappingChange < theta:
break
policy = np.zeros(100)
for s in range(1, 100):
action_values = one_step_lookahead(s, V, rewards)
best_a = np.argmax(action_values)
policy[s] = best_a
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting State (Capital) vs Value Function
# Implement!
S = np.arange(1, 100)
plt.plot(S, v[S])
plt.title('State (Capital) vs Value Function')
plt.xlabel('Capital')
plt.ylabel('Value Estimate')
# Plotting Capital vs Policy (Stake)
# Implement!
plt.bar(S, policy[S])
plt.title('Capital vs Optimal Stake')
plt.xlabel('Capital')
plt.ylabel('Optimal Stake')
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
stakes = range(1, min(s, 100-s)+1)
A = np.zeros(101)
for a in stakes:
A[a] = p_h * (rewards[s+a] + V[s+a]*discount_factor) + (1-p_h) * (rewards[s-a] + V[s-a]*discount_factor)
return A
nS = 100
V = np.zeros(nS+1)
rewards = np.zeros(nS+1)
rewards[100] = 1
while True:
delta = 0
for s in range(1, nS):
A = one_step_lookahead(s, V, rewards)
v = max(A)
delta = max(delta, np.abs(v - V[s]))
V[s] = v
if delta < theta:
break
policy = np.zeros(100)
for s in range(1, 100):
A = one_step_lookahead(s, V, rewards)
best_action = np.argmax(A)
policy[s] = best_action
return policy, V
policy, v = value_iteration_for_gamblers(0.4)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
x = range(100)
y = v[:100]
plt.plot(x, y)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Value Estimates')
# giving a title to the graph
plt.title('Final Policy (action stake) vs State (Capital)')
# function to show the plot
plt.show()
x = range(100)
y = policy
plt.bar(x, y, align='center', alpha=0.5)
# naming the x axis
plt.xlabel('Capital')
# naming the y axis
plt.ylabel('Final policy (stake)')
# giving a title to the graph
plt.title('Capital vs Final Policy')
plt.show()
policy
###Output
_____no_output_____
###Markdown
This is Example 4.3. Gambler’s Problem from Sutton's book.A gambler has the opportunity to make bets on the outcomes of a sequence of coin flips. If the coin comes up heads, he wins as many dollars as he has staked on that flip; if it is tails, he loses his stake. The game ends when the gambler wins by reaching his goal of $100, or loses by running out of money. On each flip, the gambler must decide what portion of his capital to stake, in integer numbers of dollars. This problem can be formulated as an undiscounted, episodic, finite MDP. The state is the gambler’s capital, s ∈ {1, 2, . . . , 99}.The actions are stakes, a ∈ {0, 1, . . . , min(s, 100 − s)}. The reward is zero on all transitions except those on which the gambler reaches his goal, when it is +1.The state-value function then gives the probability of winning from each state. A policy is a mapping from levels of capital to stakes. The optimal policy maximizes the probability of reaching the goal. Let p_h denote the probability of the coin coming up heads. If p_h is known, then the entire problem is known and it can be solved, for instance, by value iteration.
###Code
import numpy as np
import sys
import matplotlib.pyplot as plt
if "../" not in sys.path:
sys.path.append("../")
###Output
_____no_output_____
###Markdown
Exercise 4.9 (programming)Implement value iteration for the gambler’s problem and solve it for p_h = 0.25 and p_h = 0.55.
###Code
def value_iteration_for_gamblers(p_h, theta=0.0001, discount_factor=1.0):
"""
Args:
p_h: Probability of the coin coming up heads
"""
rewards = np.zeros(101)
rewards[100] = 1.0
def one_step_lookahead(s, V, rewards):
"""
Helper function to calculate the value for all action in a given state.
Args:
s: The gambler’s capital. Integer.
V: The vector that contains values at each state.
rewards: The reward vector.
Returns:
A vector containing the expected value of each action.
Its length equals to the number of actions.
"""
A = np.zeros(101)
stakes = range(1, min(s, 100-s)+1)
for a in stakes:
A[a] = p_h*(rewards[s+a] + discount_factor*V[s+a]) + (1-p_h)*(rewards[s-a] + discount_factor*V[s-a])
return A
V = np.zeros(101)
while True:
delta = 0
for s in range(1, 100):
A = one_step_lookahead(s, V, rewards)
best_v = np.max(A)
delta = max(delta, np.abs(best_v-V[s]))
V[s] = best_v
if delta < theta:
break
policy = np.zeros(100)
for s in range(1, 100):
A = one_step_lookahead(s, V, rewards)
best_action = np.argmax(A)
policy[s] = best_action
return policy, V
policy, v = value_iteration_for_gamblers(0.25)
print("Optimized Policy:")
print(policy)
print("")
print("Optimized Value Function:")
print(v)
print("")
# Plotting Final Policy (action stake) vs State (Capital)
x = range(100) # Capital
y = v[:100] # value estimates
plt.plot(x, y)
plt.xlabel("Capital")
plt.ylabel("Value Estimates")
plt.title('Final Policy (action stake) vs State (Capital)')
plt.show()
# Plotting Capital vs Final Policy
x = range(100)
y = policy
plt.bar(x, y)
plt.show()
###Output
_____no_output_____ |
QSVM_Qiskit.ipynb | ###Markdown
Quantum SVMIn this project, we implement a simple Support Vector Machine Algorithm using Qiskit's QSVM module. IntroductionSupport Vector Machines are one of the first traditional supervised learning algorithms used for classification tasks. To be more specific, they are used for fitting hyperplanes in order to separate or segregate different clusters of datapoint clouds from a distribution, in order to classify them.However, it is not always possible to fit a hyperplane in the raw data distribution, which can efficiently segregate the clusters. Hence we apply non linear mappings to transform the raw datapoints into a distribution which can facilitate the hyperplane fitting, much easily and efficiently. This mapped space is also known as the kernel and we can think of plotting the kernel as a visual representation of distances between pairs of datapoints in the mapped space. ApproachWe have used the inbuilt QSVM module offered by Qiskit Aqua to simulate the model. Even though, we have not implemented the model from scratch, upon reading, we understood that the baseline principle for both the classical and quantum variants of the SVM are very similar. This project is our first attempt at Quantum Machine Learning and it uses very basic datasets to train a QSVM and check the final testing accuracy. Group Members:- Khurshed Fitter- Luqman Farooqui- Abhiprada Importing Reaquired Modules
###Code
import matplotlib.pyplot as plt
import numpy as np
from qiskit import Aer
from qiskit.ml.datasets import ad_hoc_data, sample_ad_hoc_data, breast_cancer
from qiskit.circuit.library import ZZFeatureMap
from qiskit.aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import QSVM
###Output
_____no_output_____
###Markdown
Loading and Visualising Data
###Code
feature_dim = 2 # Number of input features
sample_total, train_data, test_data, labels = ad_hoc_data(training_size=20, test_size=10,
n=feature_dim, gap=0.3, plot_data=True)
###Output
_____no_output_____
###Markdown
Loading Sample DataWe need to do this in order to understand how many classes are there in the dataset. Further, it also helps us understand the labels for each of the classes in the dataset.
###Code
sample_test_data = sample_ad_hoc_data(sample_total, 10, n=feature_dim)
data_pts, class2label = split_dataset_to_data_and_labels(sample_test_data)
print("Classes and corresponding labels are:")
for c in class2label:
print(f"Class: {c}, Label: {class2label[c]}")
###Output
Classes and corresponding labels are:
Class: A, Label: 0
Class: B, Label: 1
###Markdown
Creating Feature Map and Model
###Code
seed = 10598 # Setting seed to ensure reproducable results
feature_map = ZZFeatureMap(feature_dimension=feature_dim, reps=2, entanglement='linear')
qsvm = QSVM(feature_map, train_data, test_data, data_pts[0])
###Output
_____no_output_____
###Markdown
Initailizing Backend and Invoking Model
###Code
backend = Aer.get_backend('qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed)
###Output
_____no_output_____
###Markdown
Collecting and Displaying Results
###Code
result = qsvm.run(quantum_instance)
print(f"Testing Accuracy: {result['testing_accuracy'] * 100}%")
print("Prediction on Datapoints:")
print(f"Ground Truth: {map_label_to_class_name(data_pts[1], qsvm.label_to_class)}")
print(f"Predictions: {result['predicted_classes']}")
###Output
Testing Accuracy: 100.0%
Prediction on Datapoints:
Ground Truth: ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B']
Predictions: ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B']
###Markdown
Displaying Feature Mapping KernelThe collection of inner products between linear mappings of data points, onto the mapping space is called the kernel.
###Code
print("Trained Kernel Matrix:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r')
plt.show()
###Output
Trained Kernel Matrix:
###Markdown
Additional: Breast Cancer DatasetOne can try the same procedure (as described above) on the Breast Cancer dataset.
###Code
feature_dim = 2 # Number of input features
sample_total_bc, train_data_bc, test_data_bc, labels_bc = breast_cancer(training_size=20, test_size=10,
n=feature_dim, plot_data=True)
###Output
_____no_output_____
###Markdown
Since number of features in the actual dataset is more than 2, we plotted a PCA (Principal Compoenent Analysis) of the datapoints scatter.
###Code
seed = 10598 # Setting seed to ensure reproducable results
feature_map_bc = ZZFeatureMap(feature_dimension=feature_dim, reps=2, entanglement='linear')
qsvm_bc = QSVM(feature_map_bc, train_data_bc, test_data_bc)
backend = Aer.get_backend('qasm_simulator')
quantum_instance_bc = QuantumInstance(backend, shots=1024, seed_simulator=seed, seed_transpiler=seed)
result_bc = qsvm_bc.run(quantum_instance_bc)
print(f"Testing Accuracy: {result_bc['testing_accuracy'] * 100}%")
print("Trained Kernel Matrix:")
kernel_matrix_bc = result_bc['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix_bc),interpolation='nearest',origin='upper',cmap='bone_r')
plt.show()
###Output
Trained Kernel Matrix:
|
python assignment.ipynb | ###Markdown
Question 6 Write a function pig() that takes a word (i.e., a string) as input and returns its pigLatin form. Your function should still work if the input word contains upper casecharacters. Your output should always be lower case however.
###Code
vowel_letter=('a','e','i','o','u')
def Pig_Latin(word):
if(word[0].lower() in vowel_letter): #checking for vowels in first letter
print(word.lower()+'way')
else:
print(word[1:].lower()+word[0].lower()+'ay')
print("The output is:")
Pig_Latin('happy')
Pig_Latin('Enter')
print("==========================================================")
###Output
The output is:
appyhay
enterway
==========================================================
###Markdown
Question 7 File bloodtype1.txt records blood-types of patients (A, B, AB, O or OO) at a clinic.Write a function bldcount() that reads the file with name name and reports (i.e.,prints) how many patients there are in each bloodtype.
###Code
def bloodcount():
infile = open('Downloads/bloodtype1.txt','r')
info=infile.read()
result=info.split()
bloodCount_A(result.count("A")) #each function will count the value of patients on the basis of bloodgroups.
bloodCount_B(result.count("B")) #if, elif, else will trigger on the basis of count of the patients.
bloodCount_AB(result.count("AB"))
bloodCount_O(result.count("O"))
bloodCount_OO(result.count("OO"))
infile.close()
def bloodCount_A(countA):
if(countA==0):
print('There are no patients of blood type A.')
elif(countA==1):
print("There is one patient of blood type A.")
else:
print("There are " + str(countA) + " patients of blood type A.")
def bloodCount_B(countB):
if(countB==0):
print('There are no patients of blood type B.')
elif(countB==1):
print("There is one patient of blood type B.")
else:
print("There are " + str(countB) + " patients of blood type B.")
def bloodCount_AB(countAB):
if(countAB==0):
print('There are no patients of blood type AB.')
elif(countAB==1):
print("There is one patient of blood type AB.")
else:
print("There are " + str(countAB) + " patients of blood type AB.")
def bloodCount_O(countO):
if(countO==0):
print('There are no patients of blood type O.')
elif(countO==1):
print("There is one patient of blood type O.")
else:
print("There are " + str(countO) + " patients of blood type O.")
def bloodCount_OO(countOO):
if(countOO==0):
print('There are no patients of blood type OO.')
elif(countOO==1):
print("There is one patient of blood type OO.")
else:
print("There are " + str(countOO) + " patients of blood type OO.")
print("The Output is:")
bloodcount()
print("==========================================================")
###Output
There are 15 patients of blood type A.
There is one patient of blood type B.
There are 13 patients of blood type AB.
There are 15 patients of blood type O.
There are no patients of blood type OO.
###Markdown
Question 8 Write a function curconv() that takes as input:1. a currency represented using a string (e.g., 'JPY' for the Japanese Yen or'EUR' for the Euro)2. an amountand then converts and returns the amount in US dollars.
###Code
currencyEUR= input('Enter the currency :') ## input for the Euro currency
currencyJPY= input('Enter the currency :') ## input for Japanese currency
def currconv(curr,conv): # function which takes 2 args one as a currency rate and other as exchange value
infile=open('Downloads\currencies.txt')
info= infile.read()
result=info.split() # splitting values with ,
#print(info)
#print(result)
infile.close()
if(curr in result):
pos = result.index(curr) + 1 # finding the next position(index) of the required currency
conv = float(result[pos]) * conv # Accessing that value at particular index and converting to USD
print('Your converted currency in US dollars from ' + curr + ' : ' + str(conv))
print("The output is:")
currconv(currencyEUR,100)
currconv(currencyJPY,100)
print("==========================================================")
###Output
The output is:
Your converted currency in US dollars from EUR : 122.96544
Your converted currency in US dollars from JPY : 1.241401
==========================================================
###Markdown
Question 9 Each of the following will cause an exception (an error). Identify what type ofexception each will cause
###Code
a = 6 + 'a';
print(a)
a = [1,2,3,4,5,6,7,8,9,10];
print(a[12])
import math
print(math.sqrt(-1))
print(x)
infile=open('S:/currencies.txt')
info= infile.read()
###Output
_____no_output_____
###Markdown
Question 10 Write a function called frequencies()that takes a string as its only parameter, and returns a list of integers, showing thenumber of times each character appears in the text. Your function may ignore anycharacters that are not in letters.
###Code
def length(word):
lst=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
inputWord= word
Length_lst=len(lst) # couting length of a list
for i in range(0,Length_lst): # range has been set till the lenth of a list
data= inputWord.count(lst[i]) # couning each occurance of a letter from a sentence on the basis of a list
lst1 = str(data)
print(lst1,end= "," )
print("The Output is:-")
length('The quick red fox got bored and went home.')
print("\n")
length('apple')
print("\n")
print("==========================================================")
###Output
The Output is:-
1,1,1,3,5,1,1,2,1,0,1,0,1,2,4,0,1,2,0,2,1,0,1,1,0,0,
1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,
==========================================================
|
code/8.04_intro_to_APIs.ipynb | ###Markdown
8.04 - Intro to API's
###Code
from IPython.display import display, Image
display(Image(filename='gnod_1st_iteration.jpg'))
display(Image(filename='gnod_2nd_iteration.jpg'))
a = "hello world"
len(a)
dir(a)
a
a + "dsafd"
a.__add__("dsafd")
display(Image(filename='restaurant.jpg'))
###Output
_____no_output_____
###Markdown
REST APIs REST APIs* (**RE**presentational **S**tate **T**ransfer)* Most popular way (paradigm) in which web services are structured nowaday* And so are REST APIs.* REST defines 6 architectural constraints* Those make any web service a RESTful API.* You can find them [here](https://www.geeksforgeeks.org/rest-api-architectural-constraints/:~:text=The%20only%20optional%20constraint%20of,API%20and%20Non-REST%20API.&text=For%20example%3A%20API%2Fusers.). (These concepts can be explained to more advanced classes, added as **optional** content)* REST aims for a unified standard of interfaces* This is important because it generalizes the use of **HTTP** (hypertext transfer protocol). how clients and servers make **requests** among each other.* It is the tool we will be using when interacting with an API.* Like a python object, HTTP has different methods, including the most important ones **GET** and **POST**. We will take a look at it in the next lesson.
###Code
# Import requests library
import requests
google = requests.get("https://developers.google.com")
print("Google:", google.status_code)
NBA = requests.get("https://api.sportsdata.io/api/nba/fantasy/json/CurrentSeason")
print("NBA:", NBA.status_code)
rotten_tomato = requests.get("http://api.rottentomatoes.com/api/public/v1.0/lists/movies/box_office.json")
print("Rotten Tomatoes:", rotten_tomato.status_code)
###Output
Google: 200
NBA: 401
Rotten Tomatoes: 403
###Markdown
* `200`: Everything went ok* `301`: The server is redirecting you to a different endpoint* `400`: The server thinks you mad a bad request. You're not sending the right data for a request* `401`: You're not properly authenticated* `403`: Resource you're trying to access is forbidden* `404`: Resource does not exist* `503`: The server can't handle the request Use the skyscanner API from rapidapi [link](https://rapidapi.com/skyscanner/api/skyscanner-flight-search) to the API
###Code
import getpass
# your rapid-api key here
api_key = getpass.getpass()
import requests
# send a GET request for locations
url = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/autosuggest/v1.0/UK/GBP/en-GB/"
querystring = {"query":"Stockholm"}
headers = {
'x-rapidapi-key': "",
'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
# send a GET request for location "Tokyo"
url = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/autosuggest/v1.0/UK/GBP/en-GB/"
params = {"query":"Tokyo"}
headers = {'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com",
'x-rapidapi-key': str(api_key)}
response = requests.get(url, headers = headers, params = params)
response.json()
# send a request for flight quotes from San Francisco to New York
url = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/browsequotes/v1.0/US/USD/en-US/SFO-sky/NYCA-sky/anytime"
params = {"inboundpartialdate":"2021-12-12"}
headers = {
'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com",
'x-rapidapi-key': str(api_key)}
response = requests.get(url, headers=headers, params=params)
response.json()
# Import libraries
import pandas as pd
import json
# Normalize the response
pd.json_normalize(response.json())
quotes = pd.DataFrame(pd.json_normalize(response.json())['Quotes'][0])
carriers = pd.DataFrame(pd.json_normalize(response.json())['Carriers'][0])
places = pd.DataFrame(pd.json_normalize(response.json())['Places'][0])
currencies = pd.DataFrame(pd.json_normalize(response.json())['Currencies'][0])
quotes.head()
carriers.head()
places.head()
currencies
###Output
_____no_output_____ |
examples/notebooks/unsteady_heat_equation.ipynb | ###Markdown
Solving the heat equation in PyBaMMIn this notebook we create and solve a model for unsteady heat diffusion in 1D, with a spatially dependent heat source term. The notebook is adapted from example 4.1.2 on pg.16 of the online notes found [here](https://faculty.uca.edu/darrigo/Students/M4315/Fall%202005/sep-var.pdf). We consider the heat equation $$T_{t} = kT_{xx} + Q(x), \quad 0 0,$$along with the boundary and initial conditions, $$u(0, t)=0, \quad u(L, t)=0, \quad u(x, 0)=2x-x^2,$$and heat source term $$ Q(x)=1-|x-1|.$$As in the example, we solve the problem on the domain $0 < x < 2$ (i.e. we take $L=2$). We extended the example to include a thermal diffusivity $k$, which we take to be equal to 0.75. Building the modelAs always, we start by importing PyBaMM, along with any other packages we require.
###Code
import pybamm
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
We then load up an instance of the `pybamm.BaseModel` class.
###Code
model = pybamm.BaseModel()
###Output
_____no_output_____
###Markdown
We now define the model variables and parameters. Note that we also need to define the spatial variable $x$ here, so that we can write down the spatially dependent source term $Q(x)$. Since we are solving in 1D we have decided to call the domain "rod", but we could name it anything we like. Note that in PyBaMM variables and parameters can be given useful and meaningful names, such as "Temperature", so that they can be easily referred to later.
###Code
x = pybamm.SpatialVariable("x", domain="rod", coord_sys="cartesian")
T = pybamm.Variable("Temperature", domain="rod")
k = pybamm.Parameter("Thermal diffusivity")
###Output
_____no_output_____
###Markdown
Now that we have defined the variables, we can write down the model equations and add them to the `model.rhs` dictionary. This dictionary stores the right hand sides of any time-dependent differential equations (ordinary or partial). The key is the variable inside the time derivative (in this case $T$). To define the heat source term we use a `pybamm.Function` class. The first argument of the class is the function, and the second argument is the input.
###Code
N = -k * pybamm.grad(T) # Heat flux
Q = 1 - pybamm.Function(np.abs, x - 1) # Source term
dTdt = -pybamm.div(N) + Q # The right hand side of the PDE
model.rhs = {T: dTdt} # Add to model
###Output
_____no_output_____
###Markdown
We now add the boundary conditions into the `model.boundary_conditions` dictionary. The keys of the dictionary indicate which end of the boundary the condition is applied to (in 1D this can be "left" or "right"), the entry is then give as a tuple of the value and type. In this example we have homogeneous Dirichlet boundary conditions at both ends.
###Code
model.boundary_conditions = {
T: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(0), "Dirichlet"),
}
}
###Output
_____no_output_____
###Markdown
We also need to add the initial conditions to the `model.initial_conditions` dictionary.
###Code
model.initial_conditions = {T: 2 * x - x ** 2}
###Output
_____no_output_____
###Markdown
Finally, we add any output variables to the `model.variables` dictionary. These variables can be easily accessed after the model has been solved. You can add any variables of interest to this dictionary. Here we have added the temperature, heat flux and heat source.
###Code
model.variables = {"Temperature": T, "Heat flux": N, "Heat source": Q}
###Output
_____no_output_____
###Markdown
Using the modelNow that the model has been constructed we can go ahead and define our geometry and parameter values. We start by defining the geometry for our "rod" domain. We need to define the spatial direction(s) in which spatial operators act (such as gradients). In this case it is simply $x$. We then set the minimum and maximum values $x$ can take. In this example we are solving the problem on the domain $0<x<2$.
###Code
geometry = {"rod": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(2)}}}
###Output
_____no_output_____
###Markdown
We also need to provide the values of any parameters using the `pybamm.ParameterValues` class. This class accepts a dictionary of parameter names and values. Note that the name we provide is the string name of the parameters and not its symbol.
###Code
param = pybamm.ParameterValues({"Thermal diffusivity": 0.75})
###Output
_____no_output_____
###Markdown
Now that we have defined the geometry and provided the parameters values, we can process the model.
###Code
param.process_model(model)
param.process_geometry(geometry)
###Output
_____no_output_____
###Markdown
Before we disctretise the spatial operators, we must choose and mesh and a spatial method. Here we choose to use a uniformly spaced 1D mesh with 30 points, and discretise the equations in space using the finite volume method. The information about the mesh is stored in a `pybamm.Mesh` object, wheres the spatial methods are stored in a dictionary which maps domain names to a spatial method. This allows the user to discretise different (sub)domains in a problem using different spatial methods. All of this information goes into a `pybamm.Discretisation` object, which accepts a mesh and a dictionary of spatial methods.
###Code
submesh_types = {"rod": pybamm.Uniform1DSubMesh}
var_pts = {x: 30}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
spatial_methods = {"rod": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
###Output
_____no_output_____
###Markdown
The model is then processed using the discretisation, turning the spaital operators into matrix-vector multiplications.
###Code
disc.process_model(model)
###Output
_____no_output_____
###Markdown
Now that the model has been discretised we are ready to solve. We must first choose a solver to use. For this model we choose the Scipy ODE solver, but other solvers are available in PyBaMM (see [here](https://pybamm.readthedocs.io/en/latest/source/solvers/index.html)). To solve the model, we use the method `solver.solve` which takes in a model and an array of times at which we would like the solution to be returned. Ths solution is then stored in the `solution` object. The times and states can be accessed with `solver.t` and `solver.y`.
###Code
solver = pybamm.ScipySolver()
t = np.linspace(0, 1, 100)
solution = solver.solve(model, t)
###Output
_____no_output_____
###Markdown
After solving, we can process variables using the class `pybamm.ProcessedVariable`. This returns a callable object which can be evaluated at any time and position by means of interpolating the solution. Processed variables provide a convinient way of comparing the solution to solutions from different models, or to exact solutions. Since all of the variables are names with informative strings, the user doesn't need to keep track of where they are stored in the state vector `solution.y`. This is particularly useful in complex models with lots of variables, and is automatically handled by the solution dictionary.
###Code
T_out = solution["Temperature"]
###Output
2020-05-30 11:10:29,856 - [WARNING] processed_variable.get_spatial_scale(497): No scale set for spatial variable x. Using default of 1 [m].
###Markdown
Comparison with the exact solution This example admits the exact solution $$T(x,t) = \sum_{n=1}^{\infty} \left(\frac{4}{kn^2\pi^2}q_n + \left( c_n - \frac{4}{kn^2\pi^2}q_n\right) \exp^{-k\left(\frac{n\pi}{2}\right)^2t} \right) \sin\left( \frac{n\pi x}{2}\right),$$with $$c_n = \frac{16}{n^3\pi^3}\left(1 - \cos(n \pi)\right), \quad \text{and} \quad q_n = \frac{8}{n^2\pi^2} \sin\left(\frac{n\pi}{2}\right).$$ We construct the exact solution by summing over some large number $N$ of terms in the Fourier series.
###Code
N = 100 # number of Fourier modes to sum
k_val = param["Thermal diffusivity"] # extract value of diffusivity from the parameters dictionary
# Fourier coefficients
def q(n):
return (8 / (n ** 2 * np.pi ** 2)) * np.sin(n * np.pi / 2)
def c(n):
return (16 / (n ** 3 * np.pi ** 3)) * (1 - np.cos(n * np.pi))
def b(n):
return c(n) - 4 * q(n) / (k_val * n ** 2 * np.pi ** 2)
def T_n(t, n):
return (4 * q(n) / (k_val * n ** 2 * np.pi ** 2)) + b(n) * np.exp(
-k_val * (n * np.pi / 2) ** 2 * t
)
# Sum series to get the temperature
def T_exact(x, t):
out = 0
for n in np.arange(1, N):
out += T_n(t, n) * np.sin(n * np.pi * x / 2)
return out
###Output
_____no_output_____
###Markdown
Finally, we plot the numerical and exact solutions at a series of different times. The plot demonstrates an excellent agreement between the numerical solution provided by PyBaMM (dots) and the exact solution (solid lines). Note that in the finite volume method the variable is evaluated at the cell centres.
###Code
x_nodes = mesh["rod"].nodes # numerical gridpoints
xx = np.linspace(0, 2, 101) # fine mesh to plot exact solution
plot_times = np.linspace(0, 1, 5) # times at which to plot
plt.figure(figsize=(15, 8))
cmap = plt.get_cmap("inferno")
for i, t in enumerate(plot_times):
color = cmap(float(i) / len(plot_times))
plt.plot(
x_nodes,
T_out(t, x=x_nodes),
"o",
color=color,
label="Numerical" if i == 0 else "",
)
plt.plot(
xx,
T_exact(xx, t),
"-",
color=color,
label="Exact (t={})".format(plot_times[i]),
)
plt.xlabel("x", fontsize=16)
plt.ylabel("T", fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Solving the heat equation in PyBaMMIn this notebook we create and solve a model for unsteady heat diffusion in 1D, with a spatially dependent heat source term. The notebook is adapted from example 4.1.2 on pg.16 of the online notes found [here](https://faculty.uca.edu/darrigo/Students/M4315/Fall%202005/sep-var.pdf). We consider the heat equation $$T_{t} = kT_{xx} + Q(x), \quad 0 0,$$along with the boundary and initial conditions, $$u(0, t)=0, \quad u(L, t)=0, \quad u(x, 0)=2x-x^2,$$and heat source term $$ Q(x)=1-|x-1|.$$As in the example, we solve the problem on the domain $0 < x < 2$ (i.e. we take $L=2$). We extended the example to include a thermal diffusivity $k$, which we take to be equal to 0.75. Building the modelAs always, we start by importing PyBaMM, along with any other packages we require.
###Code
import pybamm
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
We then load up an instance of the `pybamm.BaseModel` class.
###Code
model = pybamm.BaseModel()
###Output
_____no_output_____
###Markdown
We now define the model variables and parameters. Note that we also need to define the spatial variable $x$ here, so that we can write down the spatially dependent source term $Q(x)$. Since we are solving in 1D we have decided to call the domain "rod", but we could name it anything we like. Note that in PyBaMM variables and parameters can be given useful and meaningful names, such as "Temperature", so that they can be easily referred to later.
###Code
x = pybamm.SpatialVariable("x", domain="rod", coord_sys="cartesian")
T = pybamm.Variable("Temperature", domain="rod")
k = pybamm.Parameter("Thermal diffusivity")
###Output
_____no_output_____
###Markdown
Now that we have defined the variables, we can write down the model equations and add them to the `model.rhs` dictionary. This dictionary stores the right hand sides of any time-dependent differential equations (ordinary or partial). The key is the variable inside the time derivative (in this case $T$). To define the heat source term we use a `pybamm.Function` class. The first argument of the class is the function, and the second argument is the input.
###Code
N = -k * pybamm.grad(T) # Heat flux
Q = 1 - pybamm.Function(np.abs, x - 1) # Source term
dTdt = -pybamm.div(N) + Q # The right hand side of the PDE
model.rhs = {T: dTdt} # Add to model
###Output
_____no_output_____
###Markdown
We now add the boundary conditions into the `model.boundary_conditions` dictionary. The keys of the dictionary indicate which end of the boundary the condition is applied to (in 1D this can be "left" or "right"), the entry is then give as a tuple of the value and type. In this example we have homogeneous Dirichlet boundary conditions at both ends.
###Code
model.boundary_conditions = {
T: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(0), "Dirichlet"),
}
}
###Output
_____no_output_____
###Markdown
We also need to add the initial conditions to the `model.initial_conditions` dictionary.
###Code
model.initial_conditions = {T: 2 * x - x ** 2}
###Output
_____no_output_____
###Markdown
Finally, we add any output variables to the `model.variables` dictionary. These variables can be easily accessed after the model has been solved. You can add any variables of interest to this dictionary. Here we have added the temperature, heat flux and heat source.
###Code
model.variables = {"Temperature": T, "Heat flux": N, "Heat source": Q}
###Output
_____no_output_____
###Markdown
Using the modelNow that the model has been constructed we can go ahead and define our geometry and parameter values. We start by defining the geometry for our "rod" domain. We need to set the so-called "primary" variable, which is the spatial direction(s) in which spatial operators act (such as gradients). In this case it is simply $x$. We then set the minimum and maximum values $x$ can take. In this example we are solving the problem on the domain $0<x<2$.
###Code
geometry = {"rod": {"primary": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(2)}}}}
###Output
_____no_output_____
###Markdown
We also need to provide the values of any parameters using the `pybamm.ParameterValues` class. This class accepts a dictionary of parameter names and values. Note that the name we provide is the string name of the parameters and not its symbol.
###Code
param = pybamm.ParameterValues({"Thermal diffusivity": 0.75})
###Output
_____no_output_____
###Markdown
Now that we have defined the geometry and provided the parameters values, we can process the model.
###Code
param.process_model(model)
param.process_geometry(geometry)
###Output
_____no_output_____
###Markdown
Before we disctretise the spatial operators, we must choose and mesh and a spatial method. Here we choose to use a uniformly spaced 1D mesh with 30 points, and discretise the equations in space using the finite volume method. The information about the mesh is stored in a `pybamm.Mesh` object, wheres the spatial methods are stored in a dictionary which maps domain names to a spatial method. This allows the user to discretise different (sub)domains in a problem using different spatial methods. All of this information goes into a `pybamm.Discretisation` object, which accepts a mesh and a dictionary of spatial methods.
###Code
submesh_types = {"rod": pybamm.Uniform1DSubMesh}
var_pts = {x: 30}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
spatial_methods = {"rod": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
###Output
_____no_output_____
###Markdown
The model is then processed using the discretisation, turning the spaital operators into matrix-vector multiplications.
###Code
disc.process_model(model)
###Output
_____no_output_____
###Markdown
Now that the model has been discretised we are ready to solve. We must first choose a solver to use. For this model we choose the Scipy ODE solver, but other solvers are available in PyBaMM (see [here](https://pybamm.readthedocs.io/en/latest/source/solvers/index.html)). To solve the model, we use the method `solver.solve` which takes in a model and an array of times at which we would like the solution to be returned. Ths solution is then stored in the `solution` object. The times and states can be accessed with `solver.t` and `solver.y`.
###Code
solver = pybamm.ScipySolver()
t = np.linspace(0, 1, 100)
solution = solver.solve(model, t)
###Output
_____no_output_____
###Markdown
After solving, we can process variables using the class `pybamm.ProcessedVariable`. This returns a callable object which can be evaluated at any time and position by means of interpolating the solution. Processed variables provide a convinient way of comparing the solution to solutions from different models, or to exact solutions. Since all of the variables are names with informative strings, the user doesn't need to keep track of where they are stored in the state vector `solution.y`. This is particularly useful in complex models with lots of variables. Here we create `T_out` which is the processed temperature. In order to do so, we pass the variable, solution times, solution states, and the mesh to `pybamm.ProcessedVariable`.
###Code
T_out = pybamm.ProcessedVariable(model.variables["Temperature"], solution.t, solution.y, mesh)
###Output
_____no_output_____
###Markdown
Comparison with the exact solution This example admits the exact solution $$T(x,t) = \sum_{n=1}^{\infty} \left(\frac{4}{kn^2\pi^2}q_n + \left( c_n - \frac{4}{kn^2\pi^2}q_n\right) \exp^{-k\left(\frac{n\pi}{2}\right)^2t} \right) \sin\left( \frac{n\pi x}{2}\right),$$with $$c_n = \frac{16}{n^3\pi^3}\left(1 - \cos(n \pi)\right), \quad \text{and} \quad q_n = \frac{8}{n^2\pi^2} \sin\left(\frac{n\pi}{2}\right).$$ We construct the exact solution by summing over some large number $N$ of terms in the Fourier series.
###Code
N = 100 # number of Fourier modes to sum
k_val = param["Thermal diffusivity"] # extract value of diffusivity from the parameters dictionary
# Fourier coefficients
def q(n):
return (8 / (n ** 2 * np.pi ** 2)) * np.sin(n * np.pi / 2)
def c(n):
return (16 / (n ** 3 * np.pi ** 3)) * (1 - np.cos(n * np.pi))
def b(n):
return c(n) - 4 * q(n) / (k_val * n ** 2 * np.pi ** 2)
def T_n(t, n):
return (4 * q(n) / (k_val * n ** 2 * np.pi ** 2)) + b(n) * np.exp(
-k_val * (n * np.pi / 2) ** 2 * t
)
# Sum series to get the temperature
def T_exact(x, t):
out = 0
for n in np.arange(1, N):
out += T_n(t, n) * np.sin(n * np.pi * x / 2)
return out
###Output
_____no_output_____
###Markdown
Finally, we plot the numerical and exact solutions at a series of different times. The plot demonstrates an excellent agreement between the numerical solution provided by PyBaMM (dots) and the exact solution (solid lines). Note that in the finite volume method the variable is evaluated at the cell centres.
###Code
x_nodes = mesh["rod"][0].nodes # numerical gridpoints
xx = np.linspace(0, 2, 101) # fine mesh to plot exact solution
plot_times = np.linspace(0, 1, 5) # times at which to plot
plt.figure(figsize=(15, 8))
cmap = plt.get_cmap("inferno")
for i, t in enumerate(plot_times):
color = cmap(float(i) / len(plot_times))
plt.plot(
x_nodes,
T_out(t, x=x_nodes),
"o",
color=color,
label="Numerical" if i == 0 else "",
)
plt.plot(
xx,
T_exact(xx, t),
"-",
color=color,
label="Exact (t={})".format(plot_times[i]),
)
plt.xlabel("x", fontsize=16)
plt.ylabel("T", fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Solving the heat equation in PyBaMMIn this notebook we create and solve a model for unsteady heat diffusion in 1D, with a spatially dependent heat source term. The notebook is adapted from example 4.1.2 on pg.16 of the online notes found [here](https://faculty.uca.edu/darrigo/Students/M4315/Fall%202005/sep-var.pdf). We consider the heat equation $$T_{t} = kT_{xx} + Q(x), \quad 0 0,$$along with the boundary and initial conditions, $$u(0, t)=0, \quad u(L, t)=0, \quad u(x, 0)=2x-x^2,$$and heat source term $$ Q(x)=1-|x-1|.$$As in the example, we solve the problem on the domain $0 < x < 2$ (i.e. we take $L=2$). We extended the example to include a thermal diffusivity $k$, which we take to be equal to 0.75. Building the modelAs always, we start by importing PyBaMM, along with any other packages we require.
###Code
import pybamm
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
We then load up an instance of the `pybamm.BaseModel` class.
###Code
model = pybamm.BaseModel()
###Output
_____no_output_____
###Markdown
We now define the model variables and parameters. Note that we also need to define the spatial variable $x$ here, so that we can write down the spatially dependent source term $Q(x)$. Since we are solving in 1D we have decided to call the domain "rod", but we could name it anything we like. Note that in PyBaMM variables and parameters can be given useful and meaningful names, such as "Temperature", so that they can be easily referred to later.
###Code
x = pybamm.SpatialVariable("x", domain="rod", coord_sys="cartesian")
T = pybamm.Variable("Temperature", domain="rod")
k = pybamm.Parameter("Thermal diffusivity")
###Output
_____no_output_____
###Markdown
Now that we have defined the variables, we can write down the model equations and add them to the `model.rhs` dictionary. This dictionary stores the right hand sides of any time-dependent differential equations (ordinary or partial). The key is the variable inside the time derivative (in this case $T$). To define the heat source term we use a `pybamm.Function` class. The first argument of the class is the function, and the second argument is the input.
###Code
N = -k * pybamm.grad(T) # Heat flux
Q = 1 - pybamm.Function(np.abs, x - 1) # Source term
dTdt = -pybamm.div(N) + Q # The right hand side of the PDE
model.rhs = {T: dTdt} # Add to model
###Output
_____no_output_____
###Markdown
We now add the boundary conditions into the `model.boundary_conditions` dictionary. The keys of the dictionary indicate which end of the boundary the condition is applied to (in 1D this can be "left" or "right"), the entry is then give as a tuple of the value and type. In this example we have homogeneous Dirichlet boundary conditions at both ends.
###Code
model.boundary_conditions = {
T: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(0), "Dirichlet"),
}
}
###Output
_____no_output_____
###Markdown
We also need to add the initial conditions to the `model.initial_conditions` dictionary.
###Code
model.initial_conditions = {T: 2 * x - x ** 2}
###Output
_____no_output_____
###Markdown
Finally, we add any output variables to the `model.variables` dictionary. These variables can be easily accessed after the model has been solved. You can add any variables of interest to this dictionary. Here we have added the temperature, heat flux and heat source.
###Code
model.variables = {"Temperature": T, "Heat flux": N, "Heat source": Q}
###Output
_____no_output_____
###Markdown
Using the modelNow that the model has been constructed we can go ahead and define our geometry and parameter values. We start by defining the geometry for our "rod" domain. We need to set the so-called "primary" variable, which is the spatial direction(s) in which spatial operators act (such as gradients). In this case it is simply $x$. We then set the minimum and maximum values $x$ can take. In this example we are solving the problem on the domain $0<x<2$.
###Code
geometry = {"rod": {"primary": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(2)}}}}
###Output
_____no_output_____
###Markdown
We also need to provide the values of any parameters using the `pybamm.ParameterValues` class. This class accepts a dictionary of parameter names and values. Note that the name we provide is the string name of the parameters and not its symbol.
###Code
param = pybamm.ParameterValues({"Thermal diffusivity": 0.75})
###Output
_____no_output_____
###Markdown
Now that we have defined the geometry and provided the parameters values, we can process the model.
###Code
param.process_model(model)
param.process_geometry(geometry)
###Output
_____no_output_____
###Markdown
Before we disctretise the spatial operators, we must choose and mesh and a spatial method. Here we choose to use a uniformly spaced 1D mesh with 30 points, and discretise the equations in space using the finite volume method. The information about the mesh is stored in a `pybamm.Mesh` object, wheres the spatial methods are stored in a dictionary which maps domain names to a spatial method. This allows the user to discretise different (sub)domains in a problem using different spatial methods. All of this information goes into a `pybamm.Discretisation` object, which accepts a mesh and a dictionary of spatial methods.
###Code
submesh_types = {"rod": pybamm.Uniform1DSubMesh}
var_pts = {x: 30}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
spatial_methods = {"rod": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
###Output
_____no_output_____
###Markdown
The model is then processed using the discretisation, turning the spaital operators into matrix-vector multiplications.
###Code
disc.process_model(model)
###Output
_____no_output_____
###Markdown
Now that the model has been discretised we are ready to solve. We must first choose a solver to use. For this model we choose the Scipy ODE solver, but other solvers are available in PyBaMM (see [here](https://pybamm.readthedocs.io/en/latest/source/solvers/index.html)). To solve the model, we use the method `solver.solve` which takes in a model and an array of times at which we would like the solution to be returned. Ths solution is then stored in the `solution` object. The times and states can be accessed with `solver.t` and `solver.y`.
###Code
solver = pybamm.ScipySolver()
t = np.linspace(0, 1, 100)
solution = solver.solve(model, t)
###Output
_____no_output_____
###Markdown
After solving, we can process variables using the class `pybamm.ProcessedVariable`. This returns a callable object which can be evaluated at any time and position by means of interpolating the solution. Processed variables provide a convinient way of comparing the solution to solutions from different models, or to exact solutions. Since all of the variables are names with informative strings, the user doesn't need to keep track of where they are stored in the state vector `solution.y`. This is particularly useful in complex models with lots of variables, and is automatically handled by the solution dictionary.
###Code
T_out = solution["Temperature"]
###Output
_____no_output_____
###Markdown
Comparison with the exact solution This example admits the exact solution $$T(x,t) = \sum_{n=1}^{\infty} \left(\frac{4}{kn^2\pi^2}q_n + \left( c_n - \frac{4}{kn^2\pi^2}q_n\right) \exp^{-k\left(\frac{n\pi}{2}\right)^2t} \right) \sin\left( \frac{n\pi x}{2}\right),$$with $$c_n = \frac{16}{n^3\pi^3}\left(1 - \cos(n \pi)\right), \quad \text{and} \quad q_n = \frac{8}{n^2\pi^2} \sin\left(\frac{n\pi}{2}\right).$$ We construct the exact solution by summing over some large number $N$ of terms in the Fourier series.
###Code
N = 100 # number of Fourier modes to sum
k_val = param["Thermal diffusivity"] # extract value of diffusivity from the parameters dictionary
# Fourier coefficients
def q(n):
return (8 / (n ** 2 * np.pi ** 2)) * np.sin(n * np.pi / 2)
def c(n):
return (16 / (n ** 3 * np.pi ** 3)) * (1 - np.cos(n * np.pi))
def b(n):
return c(n) - 4 * q(n) / (k_val * n ** 2 * np.pi ** 2)
def T_n(t, n):
return (4 * q(n) / (k_val * n ** 2 * np.pi ** 2)) + b(n) * np.exp(
-k_val * (n * np.pi / 2) ** 2 * t
)
# Sum series to get the temperature
def T_exact(x, t):
out = 0
for n in np.arange(1, N):
out += T_n(t, n) * np.sin(n * np.pi * x / 2)
return out
###Output
_____no_output_____
###Markdown
Finally, we plot the numerical and exact solutions at a series of different times. The plot demonstrates an excellent agreement between the numerical solution provided by PyBaMM (dots) and the exact solution (solid lines). Note that in the finite volume method the variable is evaluated at the cell centres.
###Code
x_nodes = mesh["rod"][0].nodes # numerical gridpoints
xx = np.linspace(0, 2, 101) # fine mesh to plot exact solution
plot_times = np.linspace(0, 1, 5) # times at which to plot
plt.figure(figsize=(15, 8))
cmap = plt.get_cmap("inferno")
for i, t in enumerate(plot_times):
color = cmap(float(i) / len(plot_times))
plt.plot(
x_nodes,
T_out(t, x=x_nodes),
"o",
color=color,
label="Numerical" if i == 0 else "",
)
plt.plot(
xx,
T_exact(xx, t),
"-",
color=color,
label="Exact (t={})".format(plot_times[i]),
)
plt.xlabel("x", fontsize=16)
plt.ylabel("T", fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Solving the heat equation in PyBaMMIn this notebook we create and solve a model for unsteady heat diffusion in 1D, with a spatially dependent heat source term. The notebook is adapted from example 4.1.2 on pg.16 of the online notes found [here](https://faculty.uca.edu/darrigo/Students/M4315/Fall%202005/sep-var.pdf). We consider the heat equation $$T_{t} = kT_{xx} + Q(x), \quad 0 0,$$along with the boundary and initial conditions, $$u(0, t)=0, \quad u(L, t)=0, \quad u(x, 0)=2x-x^2,$$and heat source term $$ Q(x)=1-|x-1|.$$As in the example, we solve the problem on the domain $0 < x < 2$ (i.e. we take $L=2$). We extended the example to include a thermal diffusivity $k$, which we take to be equal to 0.75. Building the modelAs always, we start by importing PyBaMM, along with any other packages we require.
###Code
import pybamm
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
We then load up an instance of the `pybamm.BaseModel` class.
###Code
model = pybamm.BaseModel()
###Output
_____no_output_____
###Markdown
We now define the model variables and parameters. Note that we also need to define the spatial variable $x$ here, so that we can write down the spatially dependent source term $Q(x)$. Since we are solving in 1D we have decided to call the domain "rod", but we could name it anything we like. Note that in PyBaMM variables and parameters can be given useful and meaningful names, such as "Temperature", so that they can be easily referred to later.
###Code
x = pybamm.SpatialVariable("x", domain="rod", coord_sys="cartesian")
T = pybamm.Variable("Temperature", domain="rod")
k = pybamm.Parameter("Thermal diffusivity")
###Output
_____no_output_____
###Markdown
Now that we have defined the variables, we can write down the model equations and add them to the `model.rhs` dictionary. This dictionary stores the right hand sides of any time-dependent differential equations (ordinary or partial). The key is the variable inside the time derivative (in this case $T$). To define the heat source term we use a `pybamm.Function` class. The first argument of the class is the function, and the second argument is the input.
###Code
N = -k * pybamm.grad(T) # Heat flux
Q = 1 - pybamm.Function(np.abs, x - 1) # Source term
dTdt = -pybamm.div(N) + Q # The right hand side of the PDE
model.rhs = {T: dTdt} # Add to model
###Output
_____no_output_____
###Markdown
We now add the boundary conditions into the `model.boundary_conditions` dictionary. The keys of the dictionary indicate which end of the boundary the condition is applied to (in 1D this can be "left" or "right"), the entry is then give as a tuple of the value and type. In this example we have homogeneous Dirichlet boundary conditions at both ends.
###Code
model.boundary_conditions = {
T: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(0), "Dirichlet"),
}
}
###Output
_____no_output_____
###Markdown
We also need to add the initial conditions to the `model.initial_conditions` dictionary.
###Code
model.initial_conditions = {T: 2 * x - x ** 2}
###Output
_____no_output_____
###Markdown
Finally, we add any output variables to the `model.variables` dictionary. These variables can be easily accessed after the model has been solved. You can add any variables of interest to this dictionary. Here we have added the temperature, heat flux and heat source.
###Code
model.variables = {"Temperature": T, "Heat flux": N, "Heat source": Q}
###Output
_____no_output_____
###Markdown
Using the modelNow that the model has been constructed we can go ahead and define our geometry and parameter values. We start by defining the geometry for our "rod" domain. We need to set the so-called "primary" variable, which is the spatial direction(s) in which spatial operators act (such as gradients). In this case it is simply $x$. We then set the minimum and maximum values $x$ can take. In this example we are solving the problem on the domain $0<x<2$.
###Code
geometry = {"rod": {"primary": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(2)}}}}
###Output
_____no_output_____
###Markdown
We also need to provide the values of any parameters using the `pybamm.ParameterValues` class. This class accepts a dictionary of parameter names and values. Note that the name we provide is the string name of the parameters and not its symbol.
###Code
param = pybamm.ParameterValues({"Thermal diffusivity": 0.75})
###Output
_____no_output_____
###Markdown
Now that we have defined the geometry and provided the parameters values, we can process the model.
###Code
param.process_model(model)
param.process_geometry(geometry)
###Output
_____no_output_____
###Markdown
Before we disctretise the spatial operators, we must choose and mesh and a spatial method. Here we choose to use a uniformly spaced 1D mesh with 30 points, and discretise the equations in space using the finite volume method. The information about the mesh is stored in a `pybamm.Mesh` object, wheres the spatial methods are stored in a dictionary which maps domain names to a spatial method. This allows the user to discretise different (sub)domains in a problem using different spatial methods. All of this information goes into a `pybamm.Discretisation` object, which accepts a mesh and a dictionary of spatial methods.
###Code
submesh_types = {"rod": pybamm.Uniform1DSubMesh}
var_pts = {x: 30}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
spatial_methods = {"rod": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
###Output
_____no_output_____
###Markdown
The model is then processed using the discretisation, turning the spaital operators into matrix-vector multiplications.
###Code
disc.process_model(model)
###Output
_____no_output_____
###Markdown
Now that the model has been discretised we are ready to solve. We must first choose a solver to use. For this model we choose the Scipy ODE solver, but other solvers are available in PyBaMM (see [here](https://pybamm.readthedocs.io/en/latest/source/solvers/index.html)). To solve the model, we use the method `solver.solve` which takes in a model and an array of times at which we would like the solution to be returned. Ths solution is then stored in the `solution` object. The times and states can be accessed with `solver.t` and `solver.y`.
###Code
solver = pybamm.ScipySolver()
t = np.linspace(0, 1, 100)
solution = solver.solve(model, t)
###Output
_____no_output_____
###Markdown
After solving, we can process variables using the class `pybamm.ProcessedVariable`. This returns a callable object which can be evaluated at any time and position by means of interpolating the solution. Processed variables provide a convinient way of comparing the solution to solutions from different models, or to exact solutions. Since all of the variables are names with informative strings, the user doesn't need to keep track of where they are stored in the state vector `solution.y`. This is particularly useful in complex models with lots of variables, and is automatically handled by the solution dictionary.
###Code
T_out = solution["Temperature"]
###Output
_____no_output_____
###Markdown
Comparison with the exact solution This example admits the exact solution $$T(x,t) = \sum_{n=1}^{\infty} \left(\frac{4}{kn^2\pi^2}q_n + \left( c_n - \frac{4}{kn^2\pi^2}q_n\right) \exp^{-k\left(\frac{n\pi}{2}\right)^2t} \right) \sin\left( \frac{n\pi x}{2}\right),$$with $$c_n = \frac{16}{n^3\pi^3}\left(1 - \cos(n \pi)\right), \quad \text{and} \quad q_n = \frac{8}{n^2\pi^2} \sin\left(\frac{n\pi}{2}\right).$$ We construct the exact solution by summing over some large number $N$ of terms in the Fourier series.
###Code
N = 100 # number of Fourier modes to sum
k_val = param["Thermal diffusivity"] # extract value of diffusivity from the parameters dictionary
# Fourier coefficients
def q(n):
return (8 / (n ** 2 * np.pi ** 2)) * np.sin(n * np.pi / 2)
def c(n):
return (16 / (n ** 3 * np.pi ** 3)) * (1 - np.cos(n * np.pi))
def b(n):
return c(n) - 4 * q(n) / (k_val * n ** 2 * np.pi ** 2)
def T_n(t, n):
return (4 * q(n) / (k_val * n ** 2 * np.pi ** 2)) + b(n) * np.exp(
-k_val * (n * np.pi / 2) ** 2 * t
)
# Sum series to get the temperature
def T_exact(x, t):
out = 0
for n in np.arange(1, N):
out += T_n(t, n) * np.sin(n * np.pi * x / 2)
return out
###Output
_____no_output_____
###Markdown
Finally, we plot the numerical and exact solutions at a series of different times. The plot demonstrates an excellent agreement between the numerical solution provided by PyBaMM (dots) and the exact solution (solid lines). Note that in the finite volume method the variable is evaluated at the cell centres.
###Code
x_nodes = mesh["rod"][0].nodes # numerical gridpoints
xx = np.linspace(0, 2, 101) # fine mesh to plot exact solution
plot_times = np.linspace(0, 1, 5) # times at which to plot
plt.figure(figsize=(15, 8))
cmap = plt.get_cmap("inferno")
for i, t in enumerate(plot_times):
color = cmap(float(i) / len(plot_times))
plt.plot(
x_nodes,
T_out(t, x=x_nodes),
"o",
color=color,
label="Numerical" if i == 0 else "",
)
plt.plot(
xx,
T_exact(xx, t),
"-",
color=color,
label="Exact (t={})".format(plot_times[i]),
)
plt.xlabel("x", fontsize=16)
plt.ylabel("T", fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Solving the heat equation in PyBaMMIn this notebook we create and solve a model for unsteady heat diffusion in 1D, with a spatially dependent heat source term. The notebook is adapted from example 4.1.2 on pg.16 of the online notes found [here](https://faculty.uca.edu/darrigo/Students/M4315/Fall%202005/sep-var.pdf). We consider the heat equation $$T_{t} = kT_{xx} + Q(x), \quad 0 0,$$along with the boundary and initial conditions, $$u(0, t)=0, \quad u(L, t)=0, \quad u(x, 0)=2x-x^2,$$and heat source term $$ Q(x)=1-|x-1|.$$As in the example, we solve the problem on the domain $0 < x < 2$ (i.e. we take $L=2$). We extended the example to include a thermal diffusivity $k$, which we take to be equal to 0.75. Building the modelAs always, we start by importing PyBaMM, along with any other packages we require.
###Code
import pybamm
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
We then load up an instance of the `pybamm.BaseModel` class.
###Code
model = pybamm.BaseModel()
###Output
_____no_output_____
###Markdown
We now define the model variables and parameters. Note that we also need to define the spatial variable $x$ here, so that we can write down the spatially dependent source term $Q(x)$. Since we are solving in 1D we have decided to call the domain "rod", but we could name it anything we like. Note that in PyBaMM variables and parameters can be given useful and meaningful names, such as "Temperature", so that they can be easily referred to later.
###Code
x = pybamm.SpatialVariable("x", domain="rod", coord_sys="cartesian")
T = pybamm.Variable("Temperature", domain="rod")
k = pybamm.Parameter("Thermal diffusivity")
###Output
_____no_output_____
###Markdown
Now that we have defined the variables, we can write down the model equations and add them to the `model.rhs` dictionary. This dictionary stores the right hand sides of any time-dependent differential equations (ordinary or partial). The key is the variable inside the time derivative (in this case $T$). To define the heat source term we use a `pybamm.Function` class. The first argument of the class is the function, and the second argument is the input.
###Code
N = -k * pybamm.grad(T) # Heat flux
Q = 1 - pybamm.Function(np.abs, x - 1) # Source term
dTdt = -pybamm.div(N) + Q # The right hand side of the PDE
model.rhs = {T: dTdt} # Add to model
###Output
_____no_output_____
###Markdown
We now add the boundary conditions into the `model.boundary_conditions` dictionary. The keys of the dictionary indicate which end of the boundary the condition is applied to (in 1D this can be "left" or "right"), the entry is then give as a tuple of the value and type. In this example we have homogeneous Dirichlet boundary conditions at both ends.
###Code
model.boundary_conditions = {
T: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(0), "Dirichlet"),
}
}
###Output
_____no_output_____
###Markdown
We also need to add the initial conditions to the `model.initial_conditions` dictionary.
###Code
model.initial_conditions = {T: 2 * x - x ** 2}
###Output
_____no_output_____
###Markdown
Finally, we add any output variables to the `model.variables` dictionary. These variables can be easily accessed after the model has been solved. You can add any variables of interest to this dictionary. Here we have added the temperature, heat flux and heat source.
###Code
model.variables = {"Temperature": T, "Heat flux": N, "Heat source": Q}
###Output
_____no_output_____
###Markdown
Using the modelNow that the model has been constructed we can go ahead and define our geometry and parameter values. We start by defining the geometry for our "rod" domain. We need to set the so-called "primary" variable, which is the spatial direction(s) in which spatial operators act (such as gradients). In this case it is simply $x$. We then set the minimum and maximum values $x$ can take. In this example we are solving the problem on the domain $0<x<2$.
###Code
geometry = {"rod": {"primary": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(2)}}}}
###Output
_____no_output_____
###Markdown
We also need to provide the values of any parameters using the `pybamm.ParameterValues` class. This class accepts a dictionary of parameter names and values. Note that the name we provide is the string name of the parameters and not its symbol.
###Code
param = pybamm.ParameterValues({"Thermal diffusivity": 0.75})
###Output
_____no_output_____
###Markdown
Now that we have defined the geometry and provided the parameters values, we can process the model.
###Code
param.process_model(model)
param.process_geometry(geometry)
###Output
_____no_output_____
###Markdown
Before we disctretise the spatial operators, we must choose and mesh and a spatial method. Here we choose to use a uniformly spaced 1D mesh with 30 points, and discretise the equations in space using the finite volume method. The information about the mesh is stored in a `pybamm.Mesh` object, wheres the spatial methods are stored in a dictionary which maps domain names to a spatial method. This allows the user to discretise different (sub)domains in a problem using different spatial methods. All of this information goes into a `pybamm.Discretisation` object, which accepts a mesh and a dictionary of spatial methods.
###Code
submesh_types = {"rod": pybamm.Uniform1DSubMesh}
var_pts = {x: 30}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
spatial_methods = {"rod": pybamm.FiniteVolume}
disc = pybamm.Discretisation(mesh, spatial_methods)
###Output
_____no_output_____
###Markdown
The model is then processed using the discretisation, turning the spaital operators into matrix-vector multiplications.
###Code
disc.process_model(model)
###Output
_____no_output_____
###Markdown
Now that the model has been discretised we are ready to solve. We must first choose a solver to use. For this model we choose the Scipy ODE solver, but other solvers are available in PyBaMM (see [here](https://pybamm.readthedocs.io/en/latest/source/solvers/index.html)). To solve the model, we use the method `solver.solve` which takes in a model and an array of times at which we would like the solution to be returned. Ths solution is then stored in the `solution` object. The times and states can be accessed with `solver.t` and `solver.y`.
###Code
solver = pybamm.ScipySolver()
t = np.linspace(0, 1, 100)
solution = solver.solve(model, t)
###Output
_____no_output_____
###Markdown
After solving, we can process variables using the class `pybamm.ProcessedVariable`. This returns a callable object which can be evaluated at any time and position by means of interpolating the solution. Processed variables provide a convinient way of comparing the solution to solutions from different models, or to exact solutions. Since all of the variables are names with informative strings, the user doesn't need to keep track of where they are stored in the state vector `solution.y`. This is particularly useful in complex models with lots of variables. Here we create `T_out` which is the processed temperature. In order to do so, we pass the variable, solution times, solution states, and the mesh to `pybamm.ProcessedVariable`.
###Code
T_out = pybamm.ProcessedVariable(model.variables["Temperature"], solution.t, solution.y, mesh)
###Output
_____no_output_____
###Markdown
Comparison with the exact solution This example admits the exact solution $$T(x,t) = \sum_{n=1}^{\infty} \left(\frac{4}{kn^2\pi^2}q_n + \left( c_n - \frac{4}{kn^2\pi^2}q_n\right) \exp^{-k\left(\frac{n\pi}{2}\right)^2t} \right) \sin\left( \frac{n\pi x}{2}\right),$$with $$c_n = \frac{16}{n^3\pi^3}\left(1 - \cos(n \pi)\right), \quad \text{and} \quad q_n = \frac{8}{n^2\pi^2} \sin\left(\frac{n\pi}{2}\right).$$ We construct the exact solution by summing over some large number $N$ of terms in the Fourier series.
###Code
N = 100 # number of Fourier modes to sum
k_val = param["Thermal diffusivity"] # extract value of diffusivity from the parameters dictionary
# Fourier coefficients
def q(n):
return (8 / (n ** 2 * np.pi ** 2)) * np.sin(n * np.pi / 2)
def c(n):
return (16 / (n ** 3 * np.pi ** 3)) * (1 - np.cos(n * np.pi))
def b(n):
return c(n) - 4 * q(n) / (k_val * n ** 2 * np.pi ** 2)
def T_n(t, n):
return (4 * q(n) / (k_val * n ** 2 * np.pi ** 2)) + b(n) * np.exp(
-k_val * (n * np.pi / 2) ** 2 * t
)
# Sum series to get the temperature
def T_exact(x, t):
out = 0
for n in np.arange(1, N):
out += T_n(t, n) * np.sin(n * np.pi * x / 2)
return out
###Output
_____no_output_____
###Markdown
Finally, we plot the numerical and exact solutions at a series of different times. The plot demonstrates an excellent agreement between the numerical solution provided by PyBaMM (dots) and the exact solution (solid lines). Note that in the finite volume method the variable is evaluated at the cell centres.
###Code
x_nodes = mesh["rod"][0].nodes # numerical gridpoints
xx = np.linspace(0, 2, 101) # fine mesh to plot exact solution
plot_times = np.linspace(0, 1, 5) # times at which to plot
plt.figure(figsize=(15, 8))
cmap = plt.get_cmap("inferno")
for i, t in enumerate(plot_times):
color = cmap(float(i) / len(plot_times))
plt.plot(
x_nodes,
T_out(t, x=x_nodes),
"o",
color=color,
label="Numerical" if i == 0 else "",
)
plt.plot(
xx,
T_exact(xx, t),
"-",
color=color,
label="Exact (t={})".format(plot_times[i]),
)
plt.xlabel("x", fontsize=16)
plt.ylabel("T", fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Solving the heat equation in PyBaMMIn this notebook we create and solve a model for unsteady heat diffusion in 1D, with a spatially dependent heat source term. The notebook is adapted from example 4.1.2 on pg.16 of the online notes found [here](https://faculty.uca.edu/darrigo/Students/M4315/Fall%202005/sep-var.pdf). We consider the heat equation $$T_{t} = kT_{xx} + Q(x), \quad 0 0,$$along with the boundary and initial conditions, $$u(0, t)=0, \quad u(L, t)=0, \quad u(x, 0)=2x-x^2,$$and heat source term $$ Q(x)=1-|x-1|.$$As in the example, we solve the problem on the domain $0 < x < 2$ (i.e. we take $L=2$). We extended the example to include a thermal diffusivity $k$, which we take to be equal to 0.75. Building the modelAs always, we start by importing PyBaMM, along with any other packages we require.
###Code
%pip install pybamm -q # install PyBaMM if it is not installed
import pybamm
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
We then load up an instance of the `pybamm.BaseModel` class.
###Code
model = pybamm.BaseModel()
###Output
_____no_output_____
###Markdown
We now define the model variables and parameters. Note that we also need to define the spatial variable $x$ here, so that we can write down the spatially dependent source term $Q(x)$. Since we are solving in 1D we have decided to call the domain "rod", but we could name it anything we like. Note that in PyBaMM variables and parameters can be given useful and meaningful names, such as "Temperature", so that they can be easily referred to later.
###Code
x = pybamm.SpatialVariable("x", domain="rod", coord_sys="cartesian")
T = pybamm.Variable("Temperature", domain="rod")
k = pybamm.Parameter("Thermal diffusivity")
###Output
_____no_output_____
###Markdown
Now that we have defined the variables, we can write down the model equations and add them to the `model.rhs` dictionary. This dictionary stores the right hand sides of any time-dependent differential equations (ordinary or partial). The key is the variable inside the time derivative (in this case $T$). To define the heat source term we use a `pybamm.Function` class. The first argument of the class is the function, and the second argument is the input.
###Code
N = -k * pybamm.grad(T) # Heat flux
Q = 1 - pybamm.Function(np.abs, x - 1) # Source term
dTdt = -pybamm.div(N) + Q # The right hand side of the PDE
model.rhs = {T: dTdt} # Add to model
###Output
_____no_output_____
###Markdown
We now add the boundary conditions into the `model.boundary_conditions` dictionary. The keys of the dictionary indicate which end of the boundary the condition is applied to (in 1D this can be "left" or "right"), the entry is then give as a tuple of the value and type. In this example we have homogeneous Dirichlet boundary conditions at both ends.
###Code
model.boundary_conditions = {
T: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(0), "Dirichlet"),
}
}
###Output
_____no_output_____
###Markdown
We also need to add the initial conditions to the `model.initial_conditions` dictionary.
###Code
model.initial_conditions = {T: 2 * x - x ** 2}
###Output
_____no_output_____
###Markdown
Finally, we add any output variables to the `model.variables` dictionary. These variables can be easily accessed after the model has been solved. You can add any variables of interest to this dictionary. Here we have added the temperature, heat flux and heat source.
###Code
model.variables = {"Temperature": T, "Heat flux": N, "Heat source": Q}
###Output
_____no_output_____
###Markdown
Using the modelNow that the model has been constructed we can go ahead and define our geometry and parameter values. We start by defining the geometry for our "rod" domain. We need to define the spatial direction(s) in which spatial operators act (such as gradients). In this case it is simply $x$. We then set the minimum and maximum values $x$ can take. In this example we are solving the problem on the domain $0<x<2$.
###Code
geometry = {"rod": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(2)}}}
###Output
_____no_output_____
###Markdown
We also need to provide the values of any parameters using the `pybamm.ParameterValues` class. This class accepts a dictionary of parameter names and values. Note that the name we provide is the string name of the parameters and not its symbol.
###Code
param = pybamm.ParameterValues({"Thermal diffusivity": 0.75})
###Output
_____no_output_____
###Markdown
Now that we have defined the geometry and provided the parameters values, we can process the model.
###Code
param.process_model(model)
param.process_geometry(geometry)
###Output
_____no_output_____
###Markdown
Before we disctretise the spatial operators, we must choose and mesh and a spatial method. Here we choose to use a uniformly spaced 1D mesh with 30 points, and discretise the equations in space using the finite volume method. The information about the mesh is stored in a `pybamm.Mesh` object, wheres the spatial methods are stored in a dictionary which maps domain names to a spatial method. This allows the user to discretise different (sub)domains in a problem using different spatial methods. All of this information goes into a `pybamm.Discretisation` object, which accepts a mesh and a dictionary of spatial methods.
###Code
submesh_types = {"rod": pybamm.Uniform1DSubMesh}
var_pts = {x: 30}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
spatial_methods = {"rod": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
###Output
_____no_output_____
###Markdown
The model is then processed using the discretisation, turning the spaital operators into matrix-vector multiplications.
###Code
disc.process_model(model)
###Output
_____no_output_____
###Markdown
Now that the model has been discretised we are ready to solve. We must first choose a solver to use. For this model we choose the Scipy ODE solver, but other solvers are available in PyBaMM (see [here](https://pybamm.readthedocs.io/en/latest/source/solvers/index.html)). To solve the model, we use the method `solver.solve` which takes in a model and an array of times at which we would like the solution to be returned. Ths solution is then stored in the `solution` object. The times and states can be accessed with `solver.t` and `solver.y`.
###Code
solver = pybamm.ScipySolver()
t = np.linspace(0, 1, 100)
solution = solver.solve(model, t)
###Output
_____no_output_____
###Markdown
After solving, we can process variables using the class `pybamm.ProcessedVariable`. This returns a callable object which can be evaluated at any time and position by means of interpolating the solution. Processed variables provide a convinient way of comparing the solution to solutions from different models, or to exact solutions. Since all of the variables are names with informative strings, the user doesn't need to keep track of where they are stored in the state vector `solution.y`. This is particularly useful in complex models with lots of variables, and is automatically handled by the solution dictionary.
###Code
T_out = solution["Temperature"]
###Output
2020-05-30 11:10:29,856 - [WARNING] processed_variable.get_spatial_scale(497): No scale set for spatial variable x. Using default of 1 [m].
###Markdown
Comparison with the exact solution This example admits the exact solution $$T(x,t) = \sum_{n=1}^{\infty} \left(\frac{4}{kn^2\pi^2}q_n + \left( c_n - \frac{4}{kn^2\pi^2}q_n\right) \exp^{-k\left(\frac{n\pi}{2}\right)^2t} \right) \sin\left( \frac{n\pi x}{2}\right),$$with $$c_n = \frac{16}{n^3\pi^3}\left(1 - \cos(n \pi)\right), \quad \text{and} \quad q_n = \frac{8}{n^2\pi^2} \sin\left(\frac{n\pi}{2}\right).$$ We construct the exact solution by summing over some large number $N$ of terms in the Fourier series.
###Code
N = 100 # number of Fourier modes to sum
k_val = param["Thermal diffusivity"] # extract value of diffusivity from the parameters dictionary
# Fourier coefficients
def q(n):
return (8 / (n ** 2 * np.pi ** 2)) * np.sin(n * np.pi / 2)
def c(n):
return (16 / (n ** 3 * np.pi ** 3)) * (1 - np.cos(n * np.pi))
def b(n):
return c(n) - 4 * q(n) / (k_val * n ** 2 * np.pi ** 2)
def T_n(t, n):
return (4 * q(n) / (k_val * n ** 2 * np.pi ** 2)) + b(n) * np.exp(
-k_val * (n * np.pi / 2) ** 2 * t
)
# Sum series to get the temperature
def T_exact(x, t):
out = 0
for n in np.arange(1, N):
out += T_n(t, n) * np.sin(n * np.pi * x / 2)
return out
###Output
_____no_output_____
###Markdown
Finally, we plot the numerical and exact solutions at a series of different times. The plot demonstrates an excellent agreement between the numerical solution provided by PyBaMM (dots) and the exact solution (solid lines). Note that in the finite volume method the variable is evaluated at the cell centres.
###Code
x_nodes = mesh["rod"].nodes # numerical gridpoints
xx = np.linspace(0, 2, 101) # fine mesh to plot exact solution
plot_times = np.linspace(0, 1, 5) # times at which to plot
plt.figure(figsize=(15, 8))
cmap = plt.get_cmap("inferno")
for i, t in enumerate(plot_times):
color = cmap(float(i) / len(plot_times))
plt.plot(
x_nodes,
T_out(t, x=x_nodes),
"o",
color=color,
label="Numerical" if i == 0 else "",
)
plt.plot(
xx,
T_exact(xx, t),
"-",
color=color,
label="Exact (t={})".format(plot_times[i]),
)
plt.xlabel("x", fontsize=16)
plt.ylabel("T", fontsize=16)
plt.legend()
plt.show()
###Output
_____no_output_____ |
private_training/JNotebook_running_FSCDP_on_Colab.ipynb | ###Markdown
1. Install Torch and Opacus
###Code
!pip install torchcsprng==0.1.2+cu101 torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
!pip install opacus
###Output
_____no_output_____
###Markdown
2. Connect GDrive to this Notebook
###Code
from google.colab import drive
drive.mount('/content/gdrive/', force_remount= True)
cd gdrive/My\ Drive/Opacus
###Output
/content/gdrive/My Drive/Opacus
###Markdown
3. Import the "src" Folder
###Code
import sys
sys.path.append("src/")
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
4. The Main Function to Run
###Code
import os
import copy
import time
import pickle
import numpy as np
import torch
from torch import nn
from torchsummary import summary
from options import args_parser
from update_s4 import LocalUpdate
from utils import test_inference
from models import CNNMnistRelu, CNNMnistTanh
from models import CNNFashion_MnistRelu, CNNFashion_MnistTanh
from models import CNNCifar10Relu, CNNCifar10Tanh
from utils import average_weights, exp_details
from datasets import get_dataset
from torchvision import models
from logging_results import logging
from opacus.dp_model_inspector import DPModelInspector
from opacus.utils import module_modification
from opacus import PrivacyEngine
def main(args):
############# Common ###################
# args = args_parser()
if args.gpu:
torch.cuda.set_device(args.gpu)
device = 'cuda' if args.gpu else 'cpu'
# load dataset and user groups
train_dataset, test_dataset, user_groups = get_dataset(args)
# BUILD MODEL
if args.model == 'cnn':
# Convolutional neural netork
if args.dataset == 'mnist':
if args.activation == 'relu':
global_model = CNNMnistRelu()
elif args.activation == 'tanh':
global_model = CNNMnistTanh()
global_model.to(device)
summary(global_model, input_size=(1, 28, 28), device=device)
elif args.dataset == 'fmnist':
if args.activation == 'relu':
global_model = CNNFashion_MnistRelu()
elif args.activation == 'tanh':
global_model = CNNFashion_MnistTanh()
global_model.to(device)
summary(global_model, input_size=(1, 28, 28), device=device)
elif args.dataset == 'cifar10':
# global_model = models.resnet18(num_classes=10)
if args.activation == 'relu':
global_model = CNNCifar10Relu()
elif args.activation == 'tanh':
global_model = CNNCifar10Tanh()
global_model.to(device)
summary(global_model, input_size=(3, 32, 32), device=device)
elif args.dataset == 'dr':
global_model = models.squeezenet1_1(pretrained=True)
global_model.classifier[1] = nn.Conv2d(512, 5, kernel_size=(1,1), stride=(1,1))
global_model.num_classes = 5
global_model.to(device)
summary(global_model, input_size=(3, 224, 224), device=device)
else:
exit('Error: unrecognized model')
############# Common ###################
######### DP Model Compatibility #######
if args.withDP:
try:
inspector = DPModelInspector()
inspector.validate(global_model)
print("Model's already Valid!\n")
except:
global_model = module_modification.convert_batchnorm_modules(global_model)
inspector = DPModelInspector()
print(f"Is the model valid? {inspector.validate(global_model)}")
print("Model is convereted to be Valid!\n")
######### DP Model Compatibility #######
######### Local Models and Optimizers #############
local_models = []
local_optimizers = []
local_privacy_engine = []
for u in range(args.num_users):
local_models.append(copy.deepcopy(global_model))
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(local_models[u].parameters(), lr=args.lr,
momentum=args.momentum)
elif args.optimizer == 'adam':
optimizer = torch.optim.Adam(local_models[u].parameters(), lr=args.lr)
if args.withDP:
# This part is buggy intentionally. It makes privacy engine avoid giving error with vhp.
privacy_engine = PrivacyEngine(
local_models[u],
batch_size = int(len(train_dataset)*args.sampling_prob),
sample_size = len(train_dataset),
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier = args.noise_multiplier/np.sqrt(args.num_users),
max_grad_norm = args.max_grad_norm,
)
privacy_engine.attach(optimizer)
local_privacy_engine.append(privacy_engine)
local_optimizers.append(optimizer)
if args.optimizer == 'sgd':
g_optimizer = torch.optim.SGD(global_model.parameters(), lr=args.lr,
momentum=args.momentum)
elif args.optimizer == 'adam':
g_optimizer = torch.optim.Adam(global_model.parameters(), lr=args.lr)
if args.withDP:
local_dataset_size = int(len(train_dataset)/args.num_users)
actual_train_ds_size = local_dataset_size*args.num_users
global_privacy_engine = PrivacyEngine(
global_model,
batch_size = int(actual_train_ds_size*args.sampling_prob),
sample_size = actual_train_ds_size,
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier = args.noise_multiplier,
max_grad_norm = args.max_grad_norm)
global_privacy_engine.attach(g_optimizer)
######## Local Models and Optimizers #############
# Training
train_loss = []
test_log = []
epsilon_log = []
print("Avg batch_size: ", int(actual_train_ds_size*args.sampling_prob))
for epoch in range(args.epochs):
## Sample the users ##
idxs_users = np.random.choice(range(args.num_users),
max(int(args.frac * args.num_users), 1),
replace=False)
#####
local_weights, local_losses = [], []
for u in idxs_users:
torch.cuda.empty_cache()
local_model = LocalUpdate(args=args, dataset=train_dataset,
u_id=u, idxs=user_groups[u],
sampling_prob=args.sampling_prob,
optimizer = local_optimizers[u])
w, loss, local_optimizers[u] = local_model.update_weights(
model=local_models[u],
global_round=epoch)
local_weights.append(copy.deepcopy(w))
local_losses.append(copy.deepcopy(loss))
# update global weights
global_weights = average_weights(local_weights)
# update global weights
global_model.load_state_dict(global_weights)
for u in range(args.num_users):
local_models[u].load_state_dict(global_weights)
if epoch !=0 and epoch%30==0:
torch.cuda.empty_cache()
loss_avg = sum(local_losses) / len(local_losses)
train_loss.append(loss_avg)
_acc, _loss = test_inference(args, global_model, test_dataset)
test_log.append([_acc, _loss])
if args.withDP:
global_privacy_engine.steps = epoch+1
epsilons, _ = global_privacy_engine.get_privacy_spent(args.delta)
epsilon_log.append([epsilons])
else:
epsilon_log = None
logging(args, epoch, train_loss, test_log, epsilon_log)
print(global_privacy_engine.steps)
###Output
_____no_output_____
###Markdown
5. The Chosen Settings for the Experimt
###Code
import argparse
parser = argparse.ArgumentParser()
## etc.
parser.add_argument('--sub_dataset_size', type=int, default=-1, help='To reduce original data to a smaller \
sized dataset. For experimental purposes.')
# federated arguments (Notation for the arguments followed from paper)
parser.add_argument('--epochs', type=int, default=3001,
help="number of rounds of training")
parser.add_argument('--num_users', type=int, default=10,
help="number of users: K")
parser.add_argument('--frac', type=float, default=1.,
help='the fraction of clients: C')
parser.add_argument('--local_ep', type=int, default=1,
help="the number of local epochs: E")
parser.add_argument('--local_bs', type=int, default=1,
help="local batch size: B")
parser.add_argument('--virtual_batch_size', type=int, default=1,
help='DP VIRTUAL_BATCH_SIZE')
## Optimizer
parser.add_argument('--optimizer', type=str, default='sgd', help="type \
of optimizer")
parser.add_argument('--lr', type=float, default=.002,
help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum (default: 0.0)')
# model arguments
parser.add_argument('--model', type=str, default='cnn', help='model name')
parser.add_argument('--activation', type=str, default="tanh",
help='SGD momentum (default: 0.0)')
# other arguments
parser.add_argument('--dataset', type=str, default='dr', help="name \
of dataset")
parser.add_argument('--gpu', default="cuda:0", help="To use cuda, set \
to a specific GPU ID. Default set to use CPU.")
parser.add_argument('--iid', type=int, default=1,
help='Default set to IID. Set to 0 for non-IID.')
parser.add_argument('--unequal', type=int, default=0,
help='whether to use unequal data splits for \
non-i.i.d setting (use 0 for equal splits)')
parser.add_argument('--local_test_split', type=float, default=0., help='DP DELTA')
parser.add_argument('--dr_from_np', type=float, default=1, help='for diabetic_retinopathy dataset')
## DP arguments
parser.add_argument('--withDP', type=int, default=1, help='WithDP')
parser.add_argument('--max_grad_norm', type=float, default= 2., help='DP MAX_GRAD_NORM')
parser.add_argument('--noise_multiplier', type=float, default=1.15, help='DP NOISE_MULTIPLIER')
parser.add_argument('--delta', type=float, default=1e-4, help='DP DELTA')
parser.add_argument('--sampling_prob', type=int, default=0.03425 , help='sampling_prob')
parser.add_argument('--exp_name', type=str,
default="test_fscdp", help="The name of current experiment for logging.")
args = parser.parse_args([])
main(args)
###Output
0
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 64, 111, 111] 1,792
ReLU-2 [-1, 64, 111, 111] 0
MaxPool2d-3 [-1, 64, 55, 55] 0
Conv2d-4 [-1, 16, 55, 55] 1,040
ReLU-5 [-1, 16, 55, 55] 0
Conv2d-6 [-1, 64, 55, 55] 1,088
ReLU-7 [-1, 64, 55, 55] 0
Conv2d-8 [-1, 64, 55, 55] 9,280
ReLU-9 [-1, 64, 55, 55] 0
Fire-10 [-1, 128, 55, 55] 0
Conv2d-11 [-1, 16, 55, 55] 2,064
ReLU-12 [-1, 16, 55, 55] 0
Conv2d-13 [-1, 64, 55, 55] 1,088
ReLU-14 [-1, 64, 55, 55] 0
Conv2d-15 [-1, 64, 55, 55] 9,280
ReLU-16 [-1, 64, 55, 55] 0
Fire-17 [-1, 128, 55, 55] 0
MaxPool2d-18 [-1, 128, 27, 27] 0
Conv2d-19 [-1, 32, 27, 27] 4,128
ReLU-20 [-1, 32, 27, 27] 0
Conv2d-21 [-1, 128, 27, 27] 4,224
ReLU-22 [-1, 128, 27, 27] 0
Conv2d-23 [-1, 128, 27, 27] 36,992
ReLU-24 [-1, 128, 27, 27] 0
Fire-25 [-1, 256, 27, 27] 0
Conv2d-26 [-1, 32, 27, 27] 8,224
ReLU-27 [-1, 32, 27, 27] 0
Conv2d-28 [-1, 128, 27, 27] 4,224
ReLU-29 [-1, 128, 27, 27] 0
Conv2d-30 [-1, 128, 27, 27] 36,992
ReLU-31 [-1, 128, 27, 27] 0
Fire-32 [-1, 256, 27, 27] 0
MaxPool2d-33 [-1, 256, 13, 13] 0
Conv2d-34 [-1, 48, 13, 13] 12,336
ReLU-35 [-1, 48, 13, 13] 0
Conv2d-36 [-1, 192, 13, 13] 9,408
ReLU-37 [-1, 192, 13, 13] 0
Conv2d-38 [-1, 192, 13, 13] 83,136
ReLU-39 [-1, 192, 13, 13] 0
Fire-40 [-1, 384, 13, 13] 0
Conv2d-41 [-1, 48, 13, 13] 18,480
ReLU-42 [-1, 48, 13, 13] 0
Conv2d-43 [-1, 192, 13, 13] 9,408
ReLU-44 [-1, 192, 13, 13] 0
Conv2d-45 [-1, 192, 13, 13] 83,136
ReLU-46 [-1, 192, 13, 13] 0
Fire-47 [-1, 384, 13, 13] 0
Conv2d-48 [-1, 64, 13, 13] 24,640
ReLU-49 [-1, 64, 13, 13] 0
Conv2d-50 [-1, 256, 13, 13] 16,640
ReLU-51 [-1, 256, 13, 13] 0
Conv2d-52 [-1, 256, 13, 13] 147,712
ReLU-53 [-1, 256, 13, 13] 0
Fire-54 [-1, 512, 13, 13] 0
Conv2d-55 [-1, 64, 13, 13] 32,832
ReLU-56 [-1, 64, 13, 13] 0
Conv2d-57 [-1, 256, 13, 13] 16,640
ReLU-58 [-1, 256, 13, 13] 0
Conv2d-59 [-1, 256, 13, 13] 147,712
ReLU-60 [-1, 256, 13, 13] 0
Fire-61 [-1, 512, 13, 13] 0
Dropout-62 [-1, 512, 13, 13] 0
Conv2d-63 [-1, 5, 13, 13] 2,565
ReLU-64 [-1, 5, 13, 13] 0
AdaptiveAvgPool2d-65 [-1, 5, 1, 1] 0
================================================================
Total params: 725,061
Trainable params: 725,061
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 51.19
Params size (MB): 2.77
Estimated Total Size (MB): 54.53
----------------------------------------------------------------
Model's already Valid!
Avg batch_size: 100
Epoch: 31
Average train loss: 0.8680304259061813
Test Accuracy: 66.48%
epsilons: max 1.60, mean 1.60, std 0.00
31
Epoch: 61
Average train loss: 1.2338550209999084
Test Accuracy: 66.35%
epsilons: max 1.88, mean 1.88, std 0.00
61
Epoch: 91
Average train loss: 0.7700409732758999
Test Accuracy: 69.22%
epsilons: max 2.12, mean 2.12, std 0.00
91
Epoch: 121
Average train loss: 1.3446020433679222
Test Accuracy: 69.22%
epsilons: max 2.34, mean 2.34, std 0.00
121
Epoch: 151
Average train loss: 1.0365302547812463
Test Accuracy: 69.49%
epsilons: max 2.54, mean 2.54, std 0.00
151
Epoch: 181
Average train loss: 1.4126135781407356
Test Accuracy: 70.18%
epsilons: max 2.72, mean 2.72, std 0.00
181
Epoch: 211
Average train loss: 1.0700196724385023
Test Accuracy: 70.04%
epsilons: max 2.90, mean 2.90, std 0.00
211
Epoch: 241
Average train loss: 1.4402373015880585
Test Accuracy: 71.00%
epsilons: max 3.06, mean 3.06, std 0.00
241
Epoch: 271
Average train loss: 1.1214185684919358
Test Accuracy: 71.27%
epsilons: max 3.22, mean 3.22, std 0.00
271
Epoch: 301
Average train loss: 1.675106292963028
Test Accuracy: 71.82%
epsilons: max 3.37, mean 3.37, std 0.00
301
Epoch: 331
Average train loss: 1.6015321703249357
Test Accuracy: 71.96%
epsilons: max 3.52, mean 3.52, std 0.00
331
Epoch: 361
Average train loss: 1.449118322134018
Test Accuracy: 71.41%
epsilons: max 3.66, mean 3.66, std 0.00
361
Epoch: 391
Average train loss: 1.4826807975769043
Test Accuracy: 71.96%
epsilons: max 3.79, mean 3.79, std 0.00
391
Epoch: 421
Average train loss: 0.9254130482673645
Test Accuracy: 71.68%
epsilons: max 3.93, mean 3.93, std 0.00
421
Epoch: 451
Average train loss: 1.0424377381801606
Test Accuracy: 72.37%
epsilons: max 4.05, mean 4.05, std 0.00
451
Epoch: 481
Average train loss: 1.3688901513814926
Test Accuracy: 72.23%
epsilons: max 4.18, mean 4.18, std 0.00
481
Epoch: 511
Average train loss: 1.0848933432251215
Test Accuracy: 72.37%
epsilons: max 4.30, mean 4.30, std 0.00
511
Epoch: 541
Average train loss: 1.1458630129694938
Test Accuracy: 73.32%
epsilons: max 4.42, mean 4.42, std 0.00
541
Epoch: 571
Average train loss: 1.7325020730495453
Test Accuracy: 72.37%
epsilons: max 4.53, mean 4.53, std 0.00
571
Epoch: 601
Average train loss: 1.6278906852006911
Test Accuracy: 72.37%
epsilons: max 4.65, mean 4.65, std 0.00
601
Epoch: 631
Average train loss: 2.0078950464725493
Test Accuracy: 72.23%
epsilons: max 4.76, mean 4.76, std 0.00
631
Epoch: 661
Average train loss: 0.7366442933678627
Test Accuracy: 72.50%
epsilons: max 4.87, mean 4.87, std 0.00
661
Epoch: 691
Average train loss: 1.0183323707431555
Test Accuracy: 72.50%
epsilons: max 4.98, mean 4.98, std 0.00
691
Epoch: 721
Average train loss: 1.279178535938263
Test Accuracy: 74.28%
epsilons: max 5.08, mean 5.08, std 0.00
721
Epoch: 751
Average train loss: 1.302110480517149
Test Accuracy: 72.78%
epsilons: max 5.19, mean 5.19, std 0.00
751
Epoch: 781
Average train loss: 1.071476799249649
Test Accuracy: 74.83%
epsilons: max 5.29, mean 5.29, std 0.00
781
Epoch: 811
Average train loss: 1.080070748925209
Test Accuracy: 74.69%
epsilons: max 5.39, mean 5.39, std 0.00
811
Epoch: 841
Average train loss: 1.5498501032590866
Test Accuracy: 75.79%
epsilons: max 5.49, mean 5.49, std 0.00
841
Epoch: 871
Average train loss: 1.3900961086153985
Test Accuracy: 76.06%
epsilons: max 5.58, mean 5.58, std 0.00
871
Epoch: 901
Average train loss: 1.2506985783576965
Test Accuracy: 75.24%
epsilons: max 5.68, mean 5.68, std 0.00
901
Epoch: 931
Average train loss: 1.1775983149302192
Test Accuracy: 75.51%
epsilons: max 5.78, mean 5.78, std 0.00
931
Epoch: 961
Average train loss: 1.0888885051012038
Test Accuracy: 74.28%
epsilons: max 5.87, mean 5.87, std 0.00
961
Epoch: 991
Average train loss: 1.0380151539941835
Test Accuracy: 74.42%
epsilons: max 5.96, mean 5.96, std 0.00
991
Epoch: 1021
Average train loss: 1.2836167381610721
Test Accuracy: 73.46%
epsilons: max 6.05, mean 6.05, std 0.00
1021
Epoch: 1051
Average train loss: 0.9815390914678573
Test Accuracy: 74.56%
epsilons: max 6.15, mean 6.15, std 0.00
1051
Epoch: 1081
Average train loss: 1.3131526917219163
Test Accuracy: 75.10%
epsilons: max 6.24, mean 6.24, std 0.00
1081
Epoch: 1111
Average train loss: 1.5430449649691582
Test Accuracy: 74.56%
epsilons: max 6.32, mean 6.32, std 0.00
1111
Epoch: 1141
Average train loss: 1.9558857142925263
Test Accuracy: 74.28%
epsilons: max 6.41, mean 6.41, std 0.00
1141
Epoch: 1171
Average train loss: 1.6127178490161895
Test Accuracy: 74.42%
epsilons: max 6.50, mean 6.50, std 0.00
1171
Epoch: 1201
Average train loss: 1.1542355831246822
Test Accuracy: 74.42%
epsilons: max 6.58, mean 6.58, std 0.00
1201
Epoch: 1231
Average train loss: 1.3061110913753509
Test Accuracy: 75.51%
epsilons: max 6.67, mean 6.67, std 0.00
1231
Epoch: 1261
Average train loss: 1.6336829908192159
Test Accuracy: 72.91%
epsilons: max 6.75, mean 6.75, std 0.00
1261
Epoch: 1291
Average train loss: 1.4312793046236039
Test Accuracy: 74.56%
epsilons: max 6.84, mean 6.84, std 0.00
1291
Epoch: 1321
Average train loss: 1.666289968788624
Test Accuracy: 75.38%
epsilons: max 6.92, mean 6.92, std 0.00
1321
Epoch: 1351
Average train loss: 1.2944818764925003
Test Accuracy: 73.73%
epsilons: max 7.00, mean 7.00, std 0.00
1351
Epoch: 1381
Average train loss: 0.900818907545181
Test Accuracy: 74.83%
epsilons: max 7.09, mean 7.09, std 0.00
1381
Epoch: 1411
Average train loss: 0.8245966294780374
Test Accuracy: 74.97%
epsilons: max 7.17, mean 7.17, std 0.00
1411
Epoch: 1441
Average train loss: 2.0574686408042906
Test Accuracy: 76.61%
epsilons: max 7.25, mean 7.25, std 0.00
1441
Epoch: 1471
Average train loss: 1.0922214750258719
Test Accuracy: 75.24%
epsilons: max 7.33, mean 7.33, std 0.00
1471
Epoch: 1501
Average train loss: 0.7483208723308052
Test Accuracy: 74.97%
epsilons: max 7.41, mean 7.41, std 0.00
1501
Epoch: 1531
Average train loss: 1.0275813878513873
Test Accuracy: 75.10%
epsilons: max 7.48, mean 7.48, std 0.00
1531
Epoch: 1561
Average train loss: 1.6270009523257614
Test Accuracy: 73.60%
epsilons: max 7.56, mean 7.56, std 0.00
1561
Epoch: 1591
Average train loss: 1.2435616642236709
Test Accuracy: 77.15%
epsilons: max 7.64, mean 7.64, std 0.00
1591
Epoch: 1621
Average train loss: 0.9002696812152863
Test Accuracy: 75.92%
epsilons: max 7.71, mean 7.71, std 0.00
1621
Epoch: 1651
Average train loss: 1.2405086755752563
Test Accuracy: 76.33%
epsilons: max 7.79, mean 7.79, std 0.00
1651
Epoch: 1681
Average train loss: 0.8414239211706445
Test Accuracy: 76.47%
epsilons: max 7.87, mean 7.87, std 0.00
1681
Epoch: 1711
Average train loss: 1.1990172922611237
Test Accuracy: 75.51%
epsilons: max 7.94, mean 7.94, std 0.00
1711
Epoch: 1741
Average train loss: 1.5463334143161773
Test Accuracy: 76.33%
epsilons: max 8.02, mean 8.02, std 0.00
1741
Epoch: 1771
Average train loss: 1.0035676054656506
Test Accuracy: 74.15%
epsilons: max 8.09, mean 8.09, std 0.00
1771
Epoch: 1801
Average train loss: 0.8966000992222689
Test Accuracy: 74.83%
epsilons: max 8.17, mean 8.17, std 0.00
1801
Epoch: 1831
Average train loss: 1.7774979382753373
Test Accuracy: 75.38%
epsilons: max 8.24, mean 8.24, std 0.00
1831
Epoch: 1861
Average train loss: 1.3274790825322271
Test Accuracy: 76.06%
epsilons: max 8.31, mean 8.31, std 0.00
1861
Epoch: 1891
Average train loss: 1.4768219739198685
Test Accuracy: 75.10%
epsilons: max 8.38, mean 8.38, std 0.00
1891
Epoch: 1921
Average train loss: 1.2562436305941447
Test Accuracy: 75.79%
epsilons: max 8.45, mean 8.45, std 0.00
1921
Epoch: 1951
Average train loss: 1.079261130327359
Test Accuracy: 75.92%
epsilons: max 8.53, mean 8.53, std 0.00
1951
Epoch: 1981
Average train loss: 1.0335385203361511
Test Accuracy: 75.65%
epsilons: max 8.60, mean 8.60, std 0.00
1981
Epoch: 2011
Average train loss: 1.5446640759706498
Test Accuracy: 73.05%
epsilons: max 8.67, mean 8.67, std 0.00
2011
Epoch: 2041
Average train loss: 0.9841405741128255
Test Accuracy: 74.28%
epsilons: max 8.74, mean 8.74, std 0.00
2041
Epoch: 2071
Average train loss: 1.2356427431106567
Test Accuracy: 74.56%
epsilons: max 8.81, mean 8.81, std 0.00
2071
Epoch: 2101
Average train loss: 1.3388590414077044
Test Accuracy: 75.10%
epsilons: max 8.88, mean 8.88, std 0.00
2101
Epoch: 2131
Average train loss: 1.4482566632330418
Test Accuracy: 75.10%
epsilons: max 8.95, mean 8.95, std 0.00
2131
Epoch: 2161
Average train loss: 1.6120183706283568
Test Accuracy: 74.15%
epsilons: max 9.02, mean 9.02, std 0.00
2161
Epoch: 2191
Average train loss: 1.1626754401251673
Test Accuracy: 73.60%
epsilons: max 9.09, mean 9.09, std 0.00
2191
Epoch: 2221
Average train loss: 1.8099913954734803
Test Accuracy: 73.46%
epsilons: max 9.16, mean 9.16, std 0.00
2221
Epoch: 2251
Average train loss: 1.2859208025038242
Test Accuracy: 73.60%
epsilons: max 9.22, mean 9.22, std 0.00
2251
Epoch: 2281
Average train loss: 0.7701970211230218
Test Accuracy: 70.31%
epsilons: max 9.29, mean 9.29, std 0.00
2281
Epoch: 2311
Average train loss: 0.919431272149086
Test Accuracy: 74.56%
epsilons: max 9.36, mean 9.36, std 0.00
2311
Epoch: 2341
Average train loss: 1.4552948271142667
Test Accuracy: 74.15%
epsilons: max 9.42, mean 9.42, std 0.00
2341
Epoch: 2371
Average train loss: 0.9229130781255662
Test Accuracy: 73.46%
epsilons: max 9.49, mean 9.49, std 0.00
2371
Epoch: 2401
Average train loss: 1.0950526580214501
Test Accuracy: 74.69%
epsilons: max 9.56, mean 9.56, std 0.00
2401
Epoch: 2431
Average train loss: 0.8105157427489758
Test Accuracy: 75.38%
epsilons: max 9.63, mean 9.63, std 0.00
2431
Epoch: 2461
Average train loss: 1.7440265722572803
Test Accuracy: 74.56%
epsilons: max 9.69, mean 9.69, std 0.00
2461
Epoch: 2491
Average train loss: 1.6058862179517746
Test Accuracy: 74.56%
epsilons: max 9.76, mean 9.76, std 0.00
2491
Epoch: 2521
Average train loss: 1.1378908962011338
Test Accuracy: 74.69%
epsilons: max 9.82, mean 9.82, std 0.00
2521
Epoch: 2551
Average train loss: 2.3872986257076265
Test Accuracy: 73.60%
epsilons: max 9.89, mean 9.89, std 0.00
2551
Epoch: 2581
Average train loss: 0.7883930602110922
Test Accuracy: 75.10%
epsilons: max 9.95, mean 9.95, std 0.00
2581
Epoch: 2611
Average train loss: 1.276942177861929
Test Accuracy: 74.28%
epsilons: max 10.01, mean 10.01, std 0.00
2611
Epoch: 2641
Average train loss: 1.370180958509445
Test Accuracy: 73.87%
epsilons: max 10.08, mean 10.08, std 0.00
2641
Epoch: 2671
Average train loss: 1.2821424097754062
Test Accuracy: 73.87%
epsilons: max 10.14, mean 10.14, std 0.00
2671
Epoch: 2701
Average train loss: 1.440989726781845
Test Accuracy: 73.19%
epsilons: max 10.21, mean 10.21, std 0.00
2701
Epoch: 2731
Average train loss: 1.5192287415266037
Test Accuracy: 73.46%
epsilons: max 10.27, mean 10.27, std 0.00
2731
Epoch: 2761
Average train loss: 0.9208763502259899
Test Accuracy: 73.60%
epsilons: max 10.33, mean 10.33, std 0.00
2761
Epoch: 2791
Average train loss: 1.2961582779884337
Test Accuracy: 73.87%
epsilons: max 10.40, mean 10.40, std 0.00
2791
Epoch: 2821
Average train loss: 0.8416908806033462
Test Accuracy: 74.69%
epsilons: max 10.46, mean 10.46, std 0.00
2821
Epoch: 2851
Average train loss: 1.1525595715735109
Test Accuracy: 74.97%
epsilons: max 10.52, mean 10.52, std 0.00
2851
Epoch: 2881
Average train loss: 0.9540297769010067
Test Accuracy: 74.56%
epsilons: max 10.58, mean 10.58, std 0.00
2881
Epoch: 2911
Average train loss: 1.1379423439502716
Test Accuracy: 74.01%
epsilons: max 10.65, mean 10.65, std 0.00
2911
Epoch: 2941
Average train loss: 1.0055850505828858
Test Accuracy: 74.97%
epsilons: max 10.71, mean 10.71, std 0.00
2941
Epoch: 2971
Average train loss: 1.5548095494508742
Test Accuracy: 75.10%
epsilons: max 10.77, mean 10.77, std 0.00
2971
Epoch: 3001
Average train loss: 1.4640826493501664
Test Accuracy: 74.15%
epsilons: max 10.83, mean 10.83, std 0.00
3001
|
Localization/extended_kalman_filter_6_state/extended_kalman_filter_localization_6state.ipynb | ###Markdown
Extended Kalman Filter Localization
###Code
from IPython.display import Image
Image(filename="ekf.png",width=600)
###Output
_____no_output_____ |
Vocabulary_LinkedData.ipynb | ###Markdown
**Vocabulary Lazada Indonesian Reviews** Kontributor: Gary Dimitri Hamidi, Rakha Abadi Susilo, Nur Aini Rakhmawati Email Kontributor: [email protected], [email protected] Departemen Sistem Informasi, Institut Teknologi Sepuluh Nopember **Abstrak**Lazada merupakan salah satu e-commerce terbesar di dunia. Salah satu negara di mana Lazada menyediakan jasa mereka adalah di Indonesia. Di Indonesia, Lazada menengahi berbagai transaksi dalam berbagai bidang, salah satunya adalah barang elektronik. Di paper ini dilakukan pembuatan vocabulary terhadap produk-produk elektronik tertentu yang ada di Lazada, berdasarkan dataset yang diambil pada tahun 2019. Di paper ini dilakukan pembuatan dua vocabulary, yang pertama adalah vocabulary manual tanpa menggunakan vocabulary yang bersifat spesifik, dan yang kedua adalah vocabulary extended, di mana digunakan vocabulary GoodRelations (gr), Friend-of-a-Fried (foaf), dan schema.org (s). Lalu juga ditambahkan linked data dengan database DBPedia.**Keyword**: E-commerce, lazada, xml, vocabulary, semantic web. --- **Dataset** Data yang terdiri dari 4286 baris dan 9 kolom merupakan yang didapat dari e-commerce [Lazada](https://lazada.co.id) yang tersedia di [Kaggle](https://www.kaggle.com/grikomsn/lazada-indonesian-reviews). Setiap row mempresentasikan sebuah produk elektronik yang sedang dijual pada saat data diambil (2 Oktober 2019). Di data ini, sembilan kolom dapat dijelaskan seperti berikut:* **itemId** - ID Item / Produk di Lazada* **category** - Kategori di mana Lazada menaruh produk ini. Terdapat 5 kategori: * beli-harddisk-eksternal * beli-laptop * beli-smart-tv * jual-flash-drives * shop-televisi-digital* **name** - Nama produk yang dicantumkan oleh penjual.* **brandName** - Gender of customer* **url** - URL produk.* **price** - Harga produk.* **averagerating** - Nilai rata-rata ulasan pelanggan pada produk.* **totalReviews** - Jumlah ulasan pelanggan pada produk.* **retrievedDate** - Tanggal data diambil.
###Code
from google_drive_downloader import GoogleDriveDownloader as gdd
import pandas as pd
gdd.download_file_from_google_drive(file_id='1h2_LvGAEeieqcZSRLs0YnP2MjYJYJGbd', dest_path='./data.xls', showsize=True)
data = pd.read_excel('data.xls')
data
!python -m pip install --no-input jupyter-rdfify
%reload_ext jupyter-rdfify
###Output
/usr/local/lib/python3.7/dist-packages/rdflib_jsonld/__init__.py:12: DeprecationWarning: The rdflib-jsonld package has been integrated into rdflib as of rdflib==6.0.1. Please remove rdflib-jsonld from your project's dependencies.
DeprecationWarning,
###Markdown
--- **Vocabulary** **Namespaces** * @prefix rdfs: .* @prefix rdf: .* @prefix xsd: .* @prefix ex: .* @prefix gr: **Classes** Terdapat 3 kelas pada vocabulary ini:* **Kategori**. Contoh: "beli-smart-tv". Properti class Kategori adalah: * **memilikiMerk [class:merk]**: Kategori memilikiMerk Toshiba.* **Merk**. Contoh: "Toshiba". Properti class Merk adalah: * **memilikiProduk [class:produk]**: Toshiba memilikiProduk TOSHIBA_Smart_HD_LED_TV_32.* **Produk**. Contoh: "TOSHIBA_Smart_HD_LED_TV_32". Properti class Merk adalah: * **memilikiUri [xsd:anyUri]**: TOSHIBA_Smart_HD_LED_TV_32 memilkiUri "https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-32.html" * **memilikiHarga [xsd:currency]**: TOSHIBA_Smart_HD_LED_TV_32 memilkiHarga "2499000". * **memilikiRating [xsd:integer]**: TOSHIBA_Smart_HD_LED_TV_32 memilkiRating "4". * **memilikiTotalReview [xsd:integer]**: TOSHIBA_Smart_HD_LED_TV_32 memilkiTotalReview "4". * **memilikiTanggalMasuk [xsd:date]**: TOSHIBA_Smart_HD_LED_TV_32 memilikiTanggalMasuk "2019-10-02". **Instances** **Instance 1** * beli-smart-tv merupakan Kategori* beli-smart-tv memilikiMerk Toshiba* Toshiba memilikiProduk TOSHIBA_Smart_HD_LED_TV_32* TOSHIBA_Smart_HD_LED_TV_32 merupakan Produk.* TOSHIBA_Smart_HD_LED_TV_32 memilikiUri https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-32.html.* TOSHIBA_Smart_HD_LED_TV_32 memilikiHarga 2499000.* TOSHIBA_Smart_HD_LED_TV_32 memilikiRating 4.* TOSHIBA_Smart_HD_LED_TV_32 memilikiTotalReview 8.* TOSHIBA_Smart_HD_LED_TV_32 memilikiTanggalMasuk 2019-10-02.
###Code
%%rdf turtle
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/> .
######**CLASS**
ex:category a rdfs:Class .
ex:merk a rdfs:Class .
ex:produk a rdfs:Class .
####**PROPERTIES**
ex:memilkiMerk a rdf:Property .
ex:memilkiMerk rdfs:domain ex:category .
ex:memilkiMerk rdfs:range ex:merk .
ex:memilkiProduk a rdf:Property .
ex:memilkiProduk rdfs:domain ex:merk .
ex:memilkiProduk rdfs:range ex:produk .
ex:memilkiUri a rdf:Property .
ex:memilkiUri rdfs:domain ex:produk .
ex:memilkiUri rdfs:range xsd:anyUri .
ex:memilikiHarga a rdf:Property .
ex:memilikiHarga rdfs:domain ex:produk .
ex:memilikiHarga rdfs:range xsd:integer .
ex:memilikiRating a rdf:Property .
ex:memilikiRating rdfs:domain ex:produk .
ex:memilikiRating rdfs:range xsd:integer .
ex:memilkiTotalReview a rdf:Property .
ex:memilkiTotalReview rdfs:domain ex:produk .
ex:memilkiTotalReview rdfs:range xsd:integer .
ex:memilikiTanggalMasuk a rdf:Property .
ex:memilikiTanggalMasuk rdfs:domain ex:produk .
ex:memilikiTanggalMasuk rdfs:range xsd:date .
#####**INSTANCES**
#####
ex:TOSHIBA_Smart_HD_LED_TV_32 a ex:produk.
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiUri "https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-32.html"^^xsd:anyUri.
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiHarga "2499000"^^xsd:integer.
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiRating "4"^^xsd:integer.
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiTotalReview "8"^^xsd:integer.
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiTanggalMasuk "2019-10-02"^^xsd:date.
#####
ex:beli-smart-tv a ex:category.
ex:beli-smart-tv ex:memilikiMerk ex:Toshiba .
ex:Toshiba ex:memilkiProduk ex:TOSHIBA_Smart_HD_LED_TV_32.
ex:Toshiba a ex:merk.
ex:TOSHIBA_Smart_HD_LED_TV_32 a ex:produk.
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiUri "https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-32.html"^^xsd:anyUri.
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiHarga "2499000"^^xsd:integer. #currency #memiliki produk
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiRating "4"^^xsd:integer.
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiTotalReview "8"^^xsd:integer.
ex:TOSHIBA_Smart_HD_LED_TV_32 ex:memilikiTanggalMasuk "2019-10-02"^^xsd:date.
###Output
_____no_output_____
###Markdown
**Instance 2** * beli-smart-tv merupakan Kategori* beli-smart-tv memilikiMerk Toshiba* Toshiba memilikiProduk TOSHIBA_Smart_HD_LED_TV_40* TOSHIBA_Smart_HD_LED_TV_40 a :produk.* TOSHIBA_Smart_HD_LED_TV_40 memilikiUri https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-40.html.* TOSHIBA_Smart_HD_LED_TV_40 memilikiHarga 3788000.* TOSHIBA_Smart_HD_LED_TV_40 memilikiRating 3.* TOSHIBA_Smart_HD_LED_TV_40 memilikiTotalReview 3.* TOSHIBA_Smart_HD_LED_TV_40 memilikiTanggalMasuk 2019-10-02.
###Code
%%rdf turtle
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/> .
ex:beli-smart-tv a ex:category.
ex:beli-smart-tv ex:memilikiMerk ex:Toshiba .
ex:Toshiba ex:memilikiProduk ex:TOSHIBA_Smart_HD_LED_TV_40.
ex:Toshiba a ex:merk.
ex:TOSHIBA_Smart_HD_LED_TV_40 a ex:produk.
ex:TOSHIBA_Smart_HD_LED_TV_40 ex:memilikiUri "https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-40.html"^^xsd:anyUri.
ex:TOSHIBA_Smart_HD_LED_TV_40 ex:memilikiHarga "3788000"^^xsd:integer.
ex:TOSHIBA_Smart_HD_LED_TV_40 ex:memilikiRating "3"^^xsd:integer.
ex:TOSHIBA_Smart_HD_LED_TV_40 ex:memilikiTotalReview "3"^^xsd:integer.
ex:TOSHIBA_Smart_HD_LED_TV_40 ex:memilikiTanggalMasuk "2019-10-02"^^xsd:date.
###Output
_____no_output_____
###Markdown
**Instance 3** * beli-smart-tv merupakan Kategori.* beli-smart-tv memilikiMerk Sharp.* Sharp memilikiProduk arp_HD_LED_TV_24.* Sharp_HD_LED_TV_24 a ex:produk.* Sharp_HD_LED_TV_24 memilikiUri https://www.lazada.co.id/products/sharp-hd-led-tv-24-lc-24le175i-hitam-i100004505-s100007387.html.* Sharp_HD_LED_TV_24 memilikiHarga 1275000.* Sharp_HD_LED_TV_24 memilikiRating 3.* Sharp_HD_LED_TV_24 memilikiTotalReview 11.* Sharp_HD_LED_TV_24 memilikiTanggalMasuk 2019-10-02.
###Code
%%rdf turtle
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/> .
ex:beli-smart-tv a ex:category.
ex:beli-smart-tv ex:memilikiMerk ex:Sharp .
ex:Sharp ex:memilikiProduk ex:Sharp_HD_LED_TV_24.
ex:Sharp a ex:merk.
ex:Sharp_HD_LED_TV_24 a ex:produk.
ex:Sharp_HD_LED_TV_24 ex:memilikiUri "https://www.lazada.co.id/products/sharp-hd-led-tv-24-lc-24le175i-hitam-i100004505-s100007387.html"^^xsd:anyUri.
ex:Sharp_HD_LED_TV_24 ex:memilikiHarga "1275000"^^xsd:integer.
ex:Sharp_HD_LED_TV_24 ex:memilikiRating "3"^^xsd:integer.
ex:Sharp_HD_LED_TV_24 ex:memilikiTotalReview "11"^^xsd:integer.
ex:Sharp_HD_LED_TV_24 ex:memilikiTanggalMasuk "2019-10-02"^^xsd:date.
###Output
_____no_output_____
###Markdown
--- **Vocabulary Perpanjangan dari Vocabulary Populer** **Namespaces** * @prefix rdfs: .* @prefix rdf: .* @prefix xsd: .* @prefix ex: .* @prefix gr: .* @prefix foaf: .* @prefix s: . **Classes** * **gr:SomeItems**. Contoh: beli-smart-tv. Propertinya adalah: * **gr:category [xsd:string]**: beli-smart-tv category "beli-smart-tv". * **gr:hasBrand [gr:Brand]**: beli-smart-tv hasBrand Toshiba.* **gr:Brand**. Contoh: Toshiba. Propertinya adalah: * **gr:name [xsd:string]**: Toshiba name "Toshiba".* **gr:UnitPriceSpecification**. Contoh: Harga. Propertinya adalah: * **gr:hasCurrencyValue [xsd:currency]**.* **gr:ProductOrServiceModel**. Contoh: TOSHIBA_Smart_HD_LED_TV_32. Propertinya adalah: * **gr:name [xsd:string]**: TOSHIBA_Smart_HD_LED_TV_32 name "TOSHIBA Smart HD LED TV 32". * **gr:hasBrand [xsd:string]**: TOSHIBA_Smart_HD_LED_TV_32 hasBrand Toshiba. * **gr:hasPriceSpecification [gr:UnitPriceSpecification]**: TOSHIBA_Smart_HD_LED_TV_32 hasPriceSpecification Harga. * **s:aggregateRating [s:AggregateRating; s:ratingValue; s:reviewCount]**: TOSHIBA_Smart_HD_LED_TV_32 AggregateRating; TOSHIBA_Smart_HD_LED_TV_32 ratingValue 4; TOSHIBA_Smart_HD_LED_TV_32 reviewCount 8. * **s:dateCreated [xsd:date]**: TOSHIBA_Smart_HD_LED_TV_32 dateCreated 2019-10-02. **Instances** Instance 1
###Code
%%rdf turtle
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/> .
@prefix gr: <http://purl.org/goodrelations/v1#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix s: <http://schema.org/> .
ex:beli-smart-tv a gr:SomeItems;
gr:category "beli-smart-tv"^^xsd:string;
gr:hasBrand ex:Toshiba .
ex:Toshiba a gr:Brand;
gr:name "Toshiba"^^xsd:string.
ex:Harga a gr:UnitPriceSpecification;
gr:hasCurrencyValue "2499000"^^xsd:currency.
ex:TOSHIBA_Smart_HD_LED_TV_32 a gr:ProductOrServiceModel;
gr:name "TOSHIBA Smart HD LED TV 32"^^xsd:string;
gr:hasBrand ex:Toshiba;
gr:hasPriceSpecification ex:Harga;
foaf:page <https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-32.html>;
s:aggregateRating [ a s:AggregateRating;
s:ratingValue "4"^^xsd:integer;
s:reviewCount "8"^^xsd:integer;
];
s:dateCreated "2019-10-02"^^xsd:date.
###Output
_____no_output_____
###Markdown
Instance 2
###Code
%%rdf turtle
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/> .
@prefix gr: <http://purl.org/goodrelations/v1#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix s: <http://schema.org/> .
ex:beli-smart-tv a gr:SomeItems;
gr:category "beli-smart-tv"^^xsd:string;
gr:hasBrand ex:Toshiba .
ex:Toshiba a gr:Brand;
gr:name "Toshiba"^^xsd:string.
ex:Harga a gr:UnitPriceSpecification;
gr:hasCurrencyValue "3788000"^^xsd:currency.
ex:TOSHIBA_Smart_HD_LED_TV_40 a gr:ProductOrServiceModel;
gr:name "TOSHIBA Smart HD LED TV 40"^^xsd:string;
gr:hasBrand ex:Toshiba;
gr:hasPriceSpecification ex:Harga;
foaf:page <https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-40.html>;
s:aggregateRating [ a s:AggregateRating;
s:ratingValue "3"^^xsd:integer;
s:reviewCount "3"^^xsd:integer;
];
s:dateCreated "2019-10-02"^^xsd:date.
###Output
_____no_output_____
###Markdown
Instance 3
###Code
%%rdf turtle
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/> .
@prefix gr: <http://purl.org/goodrelations/v1#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix s: <http://schema.org/> .
ex:beli-smart-tv a gr:SomeItems;
gr:category "beli-smart-tv"^^xsd:string;
gr:hasBrand ex:Sharp .
ex:Sharp a gr:Brand;
gr:name "Sharp"^^xsd:string.
ex:Harga a gr:UnitPriceSpecification;
gr:hasCurrencyValue "1275000"^^xsd:currency.
ex:Sharp_HD_LED_TV_24 a gr:ProductOrServiceModel;
gr:name "Sharp HD LED TV 24"^^xsd:string;
gr:hasBrand ex:Sharp;
gr:hasPriceSpecification ex:Harga;
foaf:page <https://www.lazada.co.id/products/sharp-hd-led-tv-24-lc-24le175i-hitam-i100004505-s100007387.html>;
s:aggregateRating [ a s:AggregateRating;
s:ratingValue "3"^^xsd:integer;
s:reviewCount "11"^^xsd:integer;
];
s:dateCreated "2019-10-02"^^xsd:date.
###Output
_____no_output_____
###Markdown
--- **Vocabulary Perpanjangan dari Vocabulary Populer with Linked Data** **Namespaces** * @prefix rdfs: .* @prefix rdf: .* @prefix xsd: .* @prefix ex: .* @prefix gr: .* @prefix foaf: .* @prefix s: . **Classes** * **gr:SomeItems**. Contoh: beli-smart-tv. Propertinya adalah: * **gr:category [xsd:string]**: beli-smart-tv category "beli-smart-tv". * **gr:hasBrand [gr:Brand]**: beli-smart-tv hasBrand Toshiba.* **gr:Brand**. Contoh: Toshiba. Propertinya adalah: * **gr:name [xsd:string]**: Toshiba name "Toshiba". * **s:Corporation [db:Page]**: Sharp Corporation "Sharp_Corporation".* **gr:UnitPriceSpecification**. Contoh: Harga. Propertinya adalah: * **gr:hasCurrencyValue [xsd:currency]**.* **gr:ProductOrServiceModel**. Contoh: TOSHIBA_Smart_HD_LED_TV_32. Propertinya adalah: * **gr:name [xsd:string]**: TOSHIBA_Smart_HD_LED_TV_32 name "TOSHIBA Smart HD LED TV 32". * **gr:hasBrand [xsd:string]**: TOSHIBA_Smart_HD_LED_TV_32 hasBrand Toshiba. * **gr:hasPriceSpecification [gr:UnitPriceSpecification]**: TOSHIBA_Smart_HD_LED_TV_32 hasPriceSpecification Harga. * **s:aggregateRating [s:AggregateRating; s:ratingValue; s:reviewCount]**: TOSHIBA_Smart_HD_LED_TV_32 AggregateRating; TOSHIBA_Smart_HD_LED_TV_32 ratingValue 4; TOSHIBA_Smart_HD_LED_TV_32 reviewCount 8. * **s:dateCreated [xsd:date]**: TOSHIBA_Smart_HD_LED_TV_32 dateCreated 2019-10-02. **Instances** Instance 1
###Code
%%rdf turtle
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/> .
@prefix gr: <http://purl.org/goodrelations/v1#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix s: <http://schema.org/> .
@prefix db: <https://dbpedia.org/resource/> .
ex:beli-smart-tv a gr:SomeItems;
gr:category "beli-smart-tv"^^xsd:string;
gr:hasBrand ex:Toshiba .
ex:Toshiba a gr:Brand;
gr:name "Toshiba"^^xsd:string;
s:Corporation db:Toshiba.
ex:Harga a gr:UnitPriceSpecification;
gr:hasCurrencyValue "2499000"^^xsd:currency.
ex:TOSHIBA_Smart_HD_LED_TV_32 a gr:ProductOrServiceModel;
gr:name "TOSHIBA Smart HD LED TV 32"^^xsd:string;
gr:hasBrand ex:Toshiba;
gr:hasPriceSpecification ex:Harga;
foaf:page <https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-32.html>;
s:aggregateRating [ a s:AggregateRating;
s:ratingValue "4"^^xsd:integer;
s:reviewCount "8"^^xsd:integer;
];
s:dateCreated "2019-10-02"^^xsd:date.
###Output
_____no_output_____
###Markdown
Instance 2
###Code
%%rdf turtle
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/> .
@prefix gr: <http://purl.org/goodrelations/v1#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix s: <http://schema.org/> .
@prefix db: <https://dbpedia.org/resource/> .
ex:beli-smart-tv a gr:SomeItems;
gr:category "beli-smart-tv"^^xsd:string;
gr:hasBrand ex:Toshiba .
ex:Toshiba a gr:Brand;
gr:name "Toshiba"^^xsd:string;
s:Corporation db:Toshiba.
ex:Harga a gr:UnitPriceSpecification;
gr:hasCurrencyValue "3788000"^^xsd:currency.
ex:TOSHIBA_Smart_HD_LED_TV_40 a gr:ProductOrServiceModel;
gr:name "TOSHIBA Smart HD LED TV 40"^^xsd:string;
gr:hasBrand ex:Toshiba;
gr:hasPriceSpecification ex:Harga;
foaf:page <https://www.lazada.co.id/products/toshiba-smart-hd-led-tv-40.html>;
s:aggregateRating [ a s:AggregateRating;
s:ratingValue "3"^^xsd:integer;
s:reviewCount "3"^^xsd:integer;
];
s:dateCreated "2019-10-02"^^xsd:date.
###Output
_____no_output_____
###Markdown
Instance 3
###Code
%%rdf turtle
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/> .
@prefix gr: <http://purl.org/goodrelations/v1#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix s: <http://schema.org/> .
@prefix db: <https://dbpedia.org/resource/> .
ex:beli-smart-tv a gr:SomeItems;
gr:category "beli-smart-tv"^^xsd:string;
gr:hasBrand ex:Sharp .
ex:Sharp a gr:Brand;
gr:name "Sharp"^^xsd:string;
s:Corporation db:Sharp_Corporation.
ex:Harga a gr:UnitPriceSpecification;
gr:hasCurrencyValue "1275000"^^xsd:currency.
ex:Sharp_HD_LED_TV_24 a gr:ProductOrServiceModel;
gr:name "Sharp HD LED TV 24"^^xsd:string;
gr:hasBrand ex:Sharp;
gr:hasPriceSpecification ex:Harga;
foaf:page <https://www.lazada.co.id/products/sharp-hd-led-tv-24-lc-24le175i-hitam-i100004505-s100007387.html>;
s:aggregateRating [ a s:AggregateRating;
s:ratingValue "3"^^xsd:integer;
s:reviewCount "11"^^xsd:integer;
];
s:dateCreated "2019-10-02"^^xsd:date.
###Output
_____no_output_____ |
climpred_student_project_predictability_limits.ipynb | ###Markdown
`s2s_verification_climpred` student project Predictability LimitsThis notebook is part of the [tutorials](https://www.cgd.ucar.edu/events/2021/asp-colloquia/tutorials.html) in the [ASP summer school](https://www.cgd.ucar.edu/events/2021/asp-colloquia/).In the [S2S verification tutorial](https://docs.google.com/document/d/1nQOyjjAjdqN2sl3IeJYCytCo4l_49GW6fMgkKjsnsCc/edit),we use `climpred` https://climpred.readthedocs.io/en/stable/ to verify subseasonal-to-seasonal (S2S) forecasts against observations.---Intro:When verifying against reanalysis (a gridded version of observations), we are introducing an error since the model evolution will be different from that in real nature, due to truncation (and other) errors. To get an estimate of the predictability limit for a particular process, we can verify the ensemble against one of the ensemble members rather than reanalysis. This will give us a predictability limit for a particular process assuming there is no difference between the model and nature. This is also called “potential predictability”. Obviously, this estimate can be wrong if the model does not represent the physical process correctly.Level of difficulty:- country data: easy- geospatial data: medium---Other resources:- `xarray`: working horse for geospatial data in python - documentation: xarray.pydata.org/ - tutorial: https://xarray-contrib.github.io/xarray-tutorial/- `xskillscore`: is built on top of `xarray` and provides `metric`s to `climpred` - documentation: https://xskillscore.readthedocs.io/en/stable/ - quick-start: https://mybinder.org/v2/gh/xarray-contrib/xskillscore/master?urlpath=lab- `climpred`: - documentation: https://climpred.readthedocs.io/en/stable/ - data model: https://climpred.readthedocs.io/en/stable/setting-up-data.html - classes: https://climpred.readthedocs.io/en/stable/prediction-ensemble-object.html - list of initialized public datasets to work with: https://climpred.readthedocs.io/en/stable/initialized-datasets.html - terminology: https://climpred.readthedocs.io/en/stable/terminology.html - alignment: https://climpred.readthedocs.io/en/stable/alignment.html--- Usage questions? Consider...- raising an [issue](https://github.com/pangeo-data/climpred/issues), which can be transferred to [discussions](https://github.com/pangeo-data/climpred/discussions)- reaching out on [slack](asp2021-s2s.slack.com)
###Code
import numpy as np
import xarray as xr
import climpred
import matplotlib.pyplot as plt
xr.set_options(keep_attrs=True)
climpred.set_options(warn_for_failed_PredictionEnsemble_xr_call=False)
###Output
_____no_output_____
###Markdown
Get data Predictability limit To estimate the predictability limit, replace the verifying dataset with one of the ensemble members.Does the predictability horizon increase or decrease?
###Code
# first try with HindcastEnsemble
# your code
# now try with PerfectModelEnsemble
###Output
_____no_output_____
###Markdown
Reference forecasts Another aspect of predictability is to see if the S2S ensemble forecast outperforms climatology and/or persistence.Is this forecast still good if the climatology is produced from the years excluding the verified period?
###Code
# calc skill initialized and reference skills: https://climpred.readthedocs.io/en/latest/reference_forecast.html
###Output
_____no_output_____ |
Amostragem Estratificada.ipynb | ###Markdown
Amostragem Estratificada
###Code
import pandas as pd
from sklearn.model_selection import train_test_split
ls -hl data
# Python variables work with bash commands \o/
file_path = 'data/iris.csv'
# first line
!head $file_path -n 1
!echo
# second line
!head $file_path -n 2 | tail -n 1
file_path_small = 'data/iris.csv'
!head $file_path > $file_path_small
# Carregamento da base de dados e contagem de quantos registros existem por classe
iris = pd.read_csv(file_path_small)
iris['class'].value_counts()
# iris.iloc[:, 0:4]: buscamos somente os atributos previsores, ou seja, os dados sobre a pétala e sétala da planta
# iris.iloc[:, 4]: buscamos somente a classe, que é a espécie da planta (setosa, virginica ou veriscolor)
# teste_size: selecionamos 50% da base de dados, que serão copiados para as variáveis X e Y. Essa função retorna 4 valores
# stratify: para retornar a amostra baseada na classe
X, _, y,_ = train_test_split(iris.iloc[:, 0:4], iris.iloc[:, 4], test_size = 0.5, stratify = iris.iloc[:, 4])
y.value_counts()
# Carregamento da base de dados e contagem de quantos registros por classe
infert = pd.read_csv('C:\\Users\\taynna.silva\\Documents\\DS\\16.Prática em Python\\dados\\infert.csv')
infert
infert['education'].value_counts()
# Criando uma amostra com somente 40% dos registros (por isso é definido 0.6, pois é gerado o inverso)
X1, _, y1, _, = train_test_split(infert.iloc[:, 2:9], infert.iloc[:, 1], test_size = 0.6, stratify = infert.iloc[:, 1])
y1.value_counts()
###Output
_____no_output_____ |
notebooks/official/antarctica-compare.ipynb | ###Markdown
Compare Field Across Mesh Regions {antarctica_example}=================================Here is some velocity data from a glacier modelling simulation that iscompared across nodes in the simulation. We have simplified the mesh tohave the simulation node value already on the mesh.This was originally posted to[pyvista/pyvista-support\83](https://github.com/pyvista/pyvista-support/issues/83).The modeling results are courtesy of [UrrutyBenoit](https://github.com/BenoitURRUTY) and are from the[Elmer/Ice](http://elmerice.elmerfem.org) simulation software.
###Code
# sphinx_gallery_thumbnail_number = 2
import pyvista as pv
from pyvista import examples
import numpy as np
# Load the sample data
mesh = examples.download_antarctica_velocity()
mesh["magnitude"] = np.linalg.norm(mesh["ssavelocity"], axis=1)
mesh
###Output
_____no_output_____
###Markdown
Here is a helper to extract regions of the mesh based on the simulationnode.
###Code
def extract_node(node):
idx = mesh["node_value"] == node
return mesh.extract_points(idx)
p = pv.Plotter()
p.add_mesh(mesh, scalars="node_value")
for node in np.unique(mesh["node_value"]):
loc = extract_node(node).center
p.add_point_labels(loc, [f"Node {node}"])
p.show(cpos="xy")
vel_dargs = dict(scalars="magnitude", clim=[1e-3, 1e4], cmap='Blues', log_scale=True)
mesh.plot(cpos="xy", **vel_dargs)
a = extract_node(12)
b = extract_node(20)
pl = pv.Plotter()
pl.add_mesh(a, **vel_dargs)
pl.add_mesh(b, **vel_dargs)
pl.show(cpos='xy')
###Output
_____no_output_____
###Markdown
plot vectors without mesh
###Code
pl = pv.Plotter()
pl.add_mesh(a.glyph(orient="ssavelocity", factor=20), **vel_dargs)
pl.add_mesh(b.glyph(orient="ssavelocity", factor=20), **vel_dargs)
pl.camera_position = [(-1114684.6969340036, 293863.65389149904, 752186.603224546),
(-1114684.6969340036, 293863.65389149904, 0.0),
(0.0, 1.0, 0.0)]
pl.show()
###Output
_____no_output_____
###Markdown
Compare directions. Normalize them so we can get a reasonable directioncomparison.
###Code
flow_a = a.point_data['ssavelocity'].copy()
flow_a /= np.linalg.norm(flow_a, axis=1).reshape(-1, 1)
flow_b = b.point_data['ssavelocity'].copy()
flow_b /= np.linalg.norm(flow_b, axis=1).reshape(-1, 1)
# plot normalized vectors
pl = pv.Plotter()
pl.add_arrows(a.points, flow_a, mag=10000, color='b', label='flow_a')
pl.add_arrows(b.points, flow_b, mag=10000, color='r', label='flow_b')
pl.add_legend()
pl.camera_position = [(-1044239.3240694795, 354805.0268606294, 484178.24825854995),
(-1044239.3240694795, 354805.0268606294, 0.0),
(0.0, 1.0, 0.0)]
pl.show()
###Output
_____no_output_____
###Markdown
flow\_a that agrees with the mean flow path of flow\_b
###Code
agree = flow_a.dot(flow_b.mean(0))
pl = pv.Plotter()
pl.add_mesh(a, scalars=agree, cmap='bwr',
scalar_bar_args={'title': 'Flow agreement with block b'})
pl.add_mesh(b, color='w')
pl.show(cpos='xy')
agree = flow_b.dot(flow_a.mean(0))
pl = pv.Plotter()
pl.add_mesh(a, color='w')
pl.add_mesh(b, scalars=agree, cmap='bwr',
scalar_bar_args={'title': 'Flow agreement with block a'})
pl.show(cpos='xy')
###Output
_____no_output_____ |
tensorflow/rock_paper_sissors_multi_class_classifier.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors. **IMPORTANT NOTE:** This notebook is designed to run as a Colab. Click the button on top that says, `Open in Colab`, to run this notebook as a Colab. Running the notebook on your local machine might result in some of the code blocks throwing errors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# rps training set
!gdown --id 1DYVMuV2I_fA6A3er-mgTavrzKuxwpvKV
# rps testing set
!gdown --id 1RaodrRK1K03J_dGiLu8raeUynwmIbUaM
import os
import zipfile
local_zip = './rps.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('tmp/rps-train')
zip_ref.close()
local_zip = './rps-test-set.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('tmp/rps-test')
zip_ref.close()
base_dir = 'tmp/rps-train/rps'
rock_dir = os.path.join(base_dir, 'rock')
paper_dir = os.path.join(base_dir, 'paper')
scissors_dir = os.path.join(base_dir, 'scissors')
print('total training rock images:', len(os.listdir(rock_dir)))
print('total training paper images:', len(os.listdir(paper_dir)))
print('total training scissors images:', len(os.listdir(scissors_dir)))
rock_files = os.listdir(rock_dir)
print(rock_files[:10])
paper_files = os.listdir(paper_dir)
print(paper_files[:10])
scissors_files = os.listdir(scissors_dir)
print(scissors_files[:10])
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
pic_index = 2
next_rock = [os.path.join(rock_dir, fname)
for fname in rock_files[pic_index-2:pic_index]]
next_paper = [os.path.join(paper_dir, fname)
for fname in paper_files[pic_index-2:pic_index]]
next_scissors = [os.path.join(scissors_dir, fname)
for fname in scissors_files[pic_index-2:pic_index]]
for i, img_path in enumerate(next_rock+next_paper+next_scissors):
#print(img_path)
img = mpimg.imread(img_path)
plt.imshow(img)
plt.axis('Off')
plt.show()
import tensorflow as tf
import keras_preprocessing
from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator
TRAINING_DIR = "tmp/rps-train/rps"
training_datagen = ImageDataGenerator(
rescale = 1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
VALIDATION_DIR = "tmp/rps-test/rps-test-set"
validation_datagen = ImageDataGenerator(rescale = 1./255)
train_generator = training_datagen.flow_from_directory(
TRAINING_DIR,
target_size=(150,150),
class_mode='categorical',
batch_size=126
)
validation_generator = validation_datagen.flow_from_directory(
VALIDATION_DIR,
target_size=(150,150),
class_mode='categorical',
batch_size=126
)
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
model.summary()
model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
history = model.fit(train_generator, epochs=25, steps_per_epoch=20, validation_data = validation_generator, verbose = 1, validation_steps=3)
model.save("rps.h5")
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
###Output
_____no_output_____
###Markdown
Here's a codeblock just for fun!You should be able to upload an image here and have it classified without crashing. This codeblock will only work in Google Colab, however.**Important Note:** Due to some compatibility issues, the following code block will result in an error after you select the images(s) to upload if you are running this notebook as a `Colab` on the `Safari` browser. For `all other broswers`, continue with the next code block and ignore the next one after it.The ones running the `Colab` on `Safari`, comment out the code block below, uncomment the next code block and run it.
###Code
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = fn
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(fn)
print(classes)
###Output
_____no_output_____
###Markdown
For those running this `Colab` on `Safari` broswer can upload the images(s) manually. Follow the instructions, uncomment the code block below and run it.Instructions on how to upload image(s) manually in a Colab:1. Select the `folder` icon on the left `menu bar`.2. Click on the `folder with an arrow pointing upwards` named `..`3. Click on the `folder` named `tmp`.4. Inside of the `tmp` folder, `create a new folder` called `images`. You'll see the `New folder` option by clicking the `3 vertical dots` menu button next to the `tmp` folder.5. Inside of the new `images` folder, upload an image(s) of your choice, preferably of either a horse or a human. Drag and drop the images(s) on top of the `images` folder.6. Uncomment and run the code block below.
###Code
import numpy as np
from keras.preprocessing import image
import os
images = os.listdir("/tmp/images")
print(images)
for i in images:
print()
# predicting images
path = '/tmp/images/' + i
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(path)
print(classes)
###Output
_____no_output_____ |
tensorflow_guideWithIris.ipynb | ###Markdown
###Code
from sklearn import datasets
iris = datasets.load_iris()
import pandas as pd
df_iris = pd.DataFrame(iris.data)
df_iris.info()
import sqlite3
connect = sqlite3.connect('./db.sqlite3')
df_iris.to_sql('iris_resource',connect,if_exists='replace',index=False)
df_load = pd.read_sql_query('select * from iris_resource ' , connect )
df_load.head(4)
x_data = df_load.to_numpy()
x_data.shape
import numpy as np
y_data = iris.target
y_data , np.unique(y_data)
from sklearn.model_selection import train_test_split
x_train , x_val , y_train , y_val = train_test_split(x_data , y_data)
###Output
_____no_output_____
###Markdown
교육단계
###Code
import tensorflow as tf
model = tf.keras.Sequential()
model.add(tf.keras.Input(shape=(4,))) # input layer
model.add(tf.keras.layers.Dense(64,activation='relu')) # hidden layer / default relu
model.add(tf.keras.layers.Dense(24,activation='relu')) # hidden layer / default relu
model.add(tf.keras.layers.Dense(3,activation='softmax')) # ouput layer / 보기가 두가지 이면 sigmoid , 세가지 이상이면 softmax
model.compile(optimizer='adam', loss='sparse_categorical_crossentorypy' , metrics=['acc'])
# gadget / default -> optimizer='adam', loss='mse' / optimizer 로 기울기를 구하고 loss 를 확인하여 다음 값을 올릴지 내릴지 판단
# model.fit(x_data , y_data , epochs=50 , validation_split=0.3 ) # 학습 ephochs : 반복횟수
# model.fit(x_train , y_train , epochs=50 , validation_data=(x_val,y_val)) # 학습 ephochs : 반복횟수
###Output
_____no_output_____
###Markdown
Evaluation : 평가
###Code
model.evaluate(x_data,y_data)
from sklearn.metrics import classification_report , confusion_matrix
y_pred = model.predict(x_data)
y_pred.shape , y_pred[4]
y_data.shape , y_data[4]
import numpy as np
y_pred_argmax = np.argmax(y_pred, axis=1)
y_pred_argmax.shape , y_data[4]
y_data
print(classification_report(y_data , y_pred_argmax))
confusion_matrix(y_data , y_pred_argmax)
import seaborn as sns
sns.heatmap(confusion_matrix(y_data , y_pred_argmax) , annot=True)
###Output
_____no_output_____
###Markdown
서비스 단계
###Code
x_data[25] ,y_data[25]
pred = model.predict([[5. , 3. , 1.6, 0.2]])
pred
import numpy as np
np.argmax(pred)
from sklearn.metrics import roc_curve , auc
y_pred = model.predict(x_data)
y_pred.shape
from sklearn.metrics import classification_report , confusion_matrix
classification_report()
###Output
_____no_output_____ |
homework/hw7/Homework7_KernelKMeans_EM.ipynb | ###Markdown
Homework 7: Kernel K-Means and EMBy Rachel Manzelli and Brian Kulis with the help of N. Frumkin, K. Chauhan, and A. Tsiligkaridis*Please submit to Blackboard by 11:59pm on* **Friday, April 5.** Problem 1: Kernel K-MeansIn this exercise, we will consider how one may go about performing non-linear machine learning by adapting machine learning algorithms that we have discussed in class. We will discuss one particular approach that has been widely used throughout machine learning. Recall the discussion from lecture: we take our feature vectors $\boldsymbol{x}_1, ..., \boldsymbol{x}_n$ and apply a non-linear function $\phi$ to each point to yield $\phi(\boldsymbol{x}_1), ..., \phi(\boldsymbol{x}_n)$. Then, if we apply a linear machine learning algorithm (e.g., k-means or SVM) on the mapped data, the linear boundary in the mapped space will correspond to a non-linear boundary in the input space.We looked at one particular mapping in class. Consider a two-dimensional feature vector $\boldsymbol{x} = (x_1 x_2)^T$, and define the function $\phi$ as \begin{equation*}\phi(\boldsymbol{x}) = \left(\begin{array}{c}1 \\\sqrt{2} x_1 \\\sqrt{2} x_2 \\\sqrt{2} x_1 x_2\\x_1^2\\x_2^2\end{array} \right).\end{equation*}As discussed in class, the inner product $\phi(\boldsymbol{x}_i)^T \phi(\boldsymbol{x}_j)$ between two mapped vectors is equal to $(\boldsymbol{x}_i^T \boldsymbol{x}_j + 1)^2$; that is, one can compute the inner product between data points in the mapped space without explicitly forming the 6-dimensional mapped vectors for the data. Because applying such a mapping may be computationally expensive, this trick can allow us to run machine learning algorithms in the mapped space without explicitly forming the mappings. For instance, in a k-NN classifier, one must compute the (squared) Euclidean distance between a test point $\boldsymbol{x}_t$ and a training point $\boldsymbol{x}_i$. Expanding this distance out yields\begin{equation*}\|\boldsymbol{x}_t - \boldsymbol{x}_i\|^2_2 = (\boldsymbol{x}_t - \boldsymbol{x}_i)^T (\boldsymbol{x}_t - \boldsymbol{x}_i) = \boldsymbol{x}_t^T \boldsymbol{x}_t - 2 \boldsymbol{x}_t^T \boldsymbol{x}_i + \boldsymbol{x}_i^T \boldsymbol{x}_i.\end{equation*}Then, computing this distance after applying the mapping $\phi$ would be easy:\begin{equation*}\|\phi(\boldsymbol{x}_t) - \phi(\boldsymbol{x}_i)\|^2_2 = (\boldsymbol{x}_t^T \boldsymbol{x}_t + 1)^2 - 2 (\boldsymbol{x}_t^T \boldsymbol{x}_i + 1)^2 + (\boldsymbol{x}_i^T \boldsymbol{x}_i + 1)^2.\end{equation*}**a.** In the example above, the original feature vector was 2-dimensional. Show how to generalize the $\phi$ mapping to $d$-dimensional vector inputs such that the inner product between mapped vectors is $(\boldsymbol{x}_i^T \boldsymbol{x}_j + 1)^2$. Explicitly describe the embedding $\phi$; what dimensions does it have, and what values will it represent?**b.** Consider extending the k-means algorithm to discover non-linear boundaries using the above mapping. In the k-means algorithm, the assignment step involves computing $\|\boldsymbol{x}_i - \boldsymbol{\mu}_j\|_2^2$ for each point $\boldsymbol{x}_i$ and each cluster mean $\boldsymbol{\mu}_j$. Suppose we map the data via $\phi$. How would one compute the distance $\|\phi(\boldsymbol{x}_i) - \boldsymbol{\mu}_j\|^2_2$, where now $\boldsymbol{\mu}_j$ is the mean of the mapped data points? Be careful: one cannot simply compute\begin{equation*} (\boldsymbol{x}_i^T \boldsymbol{x}_i + 1)^2 - 2 (\boldsymbol{x}_i^T \boldsymbol{\mu}_j + 1)^2 + (\boldsymbol{\mu}_j^T \boldsymbol{\mu}_j + 1)^2.\end{equation*}**c.** Write out pseudocode for the extension of k-means where this mapping is applied to the data. In your algorithm, be careful not to ever explicitly compute $\phi(\boldsymbol{x}_i)$ for any data vector; *only work with inner products in the algorithm.***d.** With this new mapping, what properties will the decision surface have (i.e, what could it look like)? Why is this? a. $\phi(\vec{x}_d)$ for $\vec{x}_d = (x_1 x_2 x_3 ... x_d)$$\phi(\boldsymbol{\vec{x}_d}) = \left(\begin{array}{c}1 \\\sqrt{2} x_1 \\\sqrt{2} x_2 \\\sqrt{2} x_3\\.\\.\\.\\\sqrt{2} x_d \\\sqrt{2} x_1x_2 \\\sqrt{2} x_1x_3 \\.\\.\\.\\\sqrt{2} x_{d-1}x_d\\x_1^2\\x_2^2\\.\\.\\.\\x_d^2\end{array} \right).$$\phi(\vec{x}_d)$'s first entry is one. The following entries are combinations of (d choose 1), (d choose 2) of $x_d$. Then the final entries are $x_1^2 x_2^2 ... x_d^2$.Dimesion of $\phi(\vec{x}_d)$ is (1 + (d choose 1) + (d choose 2) + d)x1 b.$\|\phi(\boldsymbol{x}_i) - \boldsymbol{\mu}_j\|^2_2$$=\phi(\boldsymbol{x}_i)^T\phi(\boldsymbol{x}_i) -\frac{2\Sigma_{x_k\in\pi_j}\phi(x_k)\phi(x_i)}{\|\pi_c\|} + \frac{\Sigma_{x_k\in\pi_j, x_l\in\pi_j}\phi(x_k)\phi(x_l)}{\|\pi_c\|^2}$$=(x_i^Tx_i+1)^2 - \frac{2\Sigma_{x_k\in\pi_j}(x_k^Tx_i+1)^2}{\|\pi_c\|} + \frac{\Sigma_{x_k\in\pi_j, x_l\in\pi_j}(x_k+x_l+1)^2}{\|\pi_c\|^2}$where $\pi_j:$ cluster j, $\|\pi_j\|:$ number of data points in cluster j c.Algorithm:1. Randomly initialize k cluster2. Compute the distance of each data point and the cluster mean in the new space using$\|\phi(\boldsymbol{x}_i) - \boldsymbol{\mu}_j\|^2_2=(x_i^Tx_i+1)^2 - \frac{2\Sigma_{x_k\in\pi_j}(x_k^Tx_i+1)^2}{\|\pi_c\|} + \frac{\Sigma_{x_k\in\pi_j, x_l\in\pi_j}(x_k+x_l+1)^2}{\|\pi_c\|^2}$3. Assign data point to the closet cluster4. Check convergence, if yes, done, else go to step 2 d. Polynominal Decision Surface because we mapped data points to higher dimesion of polynomials. Problem 2: Expectation-Maximization (E-M)As you saw in lecture, the expectation-maximization algorithm is an iterative method to find maximum likelihood (ML) estimates of parameters in statistical models. The E-M algorithm alternates between performing an expectation (E) step, which creates a function for the expectation of the log-likelihood evaluated using the current estimate for the parameters, and a maximization (M) step, which computes parameters maximizing the expected log-likelihood found on the E step. This alternation repeats until convergence.We would like you to perform E-M first on a sample Gaussian mixture model (GMM). Doing this will allow you to prove that your algorithm works, since you already know the parameters of the model. Follow the instructions step by step below.
###Code
from matplotlib.patches import Ellipse
from scipy.special import logsumexp
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
###Output
_____no_output_____
###Markdown
**a. Data creation.** Create 3 2D Gaussian clusters of data, with the following means and covariances:$\boldsymbol{\mu}_1 = [2,2]^T, \boldsymbol{\mu}_2 = [-2,-2]^T, \boldsymbol{\mu}_3 = [0,-2]^T$,$\Sigma_1 = [[0.02,0];[0,0.02]]$, $\Sigma_2 = [[0.2,0];[0,0.2]]$, $\Sigma_3 = [[0.05,0];[0,0.05]]$ Create 50 points in each cluster and plot the data. The combination of these will serve as your Gaussian mixture model.
###Code
# Part a - data creation. This code is from the previous homework. You do not have to edit it.
num_pts = 50
x1, y1 = np.random.multivariate_normal([2,2], [[0.02,0],[0,0.02]], num_pts).T
x2, y2 = np.random.multivariate_normal([-2,2], [[0.2,0],[0,0.2]], num_pts).T
x3, y3 = np.random.multivariate_normal([0,-2], [[0.05,0],[0,0.05]], num_pts).T
# Concatenate clusters into one dataset
x_total = np.zeros((3*num_pts,1))
x_total[0:num_pts,0] = x1
x_total[num_pts:2*num_pts,0] = x2
x_total[2*num_pts:3*num_pts,0] = x3
y_total = np.zeros((3*num_pts,1))
y_total[0:num_pts,0] = y1
y_total[num_pts:2*num_pts,0] = y2
y_total[2*num_pts:3*num_pts,0] = y3
data = np.concatenate((x_total,y_total),axis=1)
# Plotting
plt.plot(x_total,y_total,'x')
ax = plt.gca()
ax.set_xlim([-5,5])
ax.set_ylim([-5,5])
plt.grid()
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Multivariate Gaussian - 3 Variables')
plt.show()
###Output
_____no_output_____
###Markdown
**b. Fill in the code to complete the EM algorithm given below.** Remember, the EM algorithm is given by a process similar to k-means/DP-means in nature, since it is iterative. However, the actual calculations done are very different. For a Gaussian mixture model, they are described by:*E-Step (Compute probabilities with given Gaussian parameters.* **This has already been completed for you. You can find the equations for this in the Discussion 8 folder on GitHub.**)*M-Step (Update parameters. The subscript c denotes the parameter for a given cluster c, so this is calculated for each cluster.):*\begin{equation*}n\_per\_cluster = \sum_{i=1}^{n\_points} \gamma(z_{ic})\end{equation*}\begin{equation*}\pi_c = \frac{n\_per\_cluster}{n\_points}\end{equation*}\begin{equation*}\mu_c = \frac{1}{n\_per\_cluster} * \sum_{i=1}^{n\_points} \gamma(z_{ic}) * x_i \end{equation*}\begin{equation*}\Sigma_c = \frac{1}{n\_per\_cluster} * \sum_{i=1}^{n\_points} \gamma(z_{ic}) * (x_i - \mu_c)(x_i - \mu_c)^T \end{equation*}*Repeat until convergence. To check for convergence, we check if the log-likelihood estimate is close enough to the previous estimate to stop the algorithm. To compute the log-likelihood estimate:*\begin{equation*}LL(\theta) = \sum_{i=1}^{n\_points} log \sum_{j=1}^{k} \pi_j * \frac{1}{2\pi|\Sigma_j|^\frac{1}{2}} exp(-0.5*(x_i - \mu_j)^T\Sigma_j^{-1}(x_i - \mu_j))\end{equation*}*Note that the "absolute value" signs around $\Sigma_j$ are actually indicative of the determinant of the covariance matrix. **In completing the algorithm below, you will complete the M-Step and the log-likelihood estimate. To compute the log-likelihood, we strongly recommend using `scipy.special.logsumexp`, as it is more numerically stable than manually computing.**
###Code
def EStep(data, n_points, k, pi, mu, cov):
## Performs the expectation (E) step ##
## You do not need to edit this function (actually, please do not edit it..)
# The end result is an n_points x k matrix, where each element is the probability that
# the ith point will be in the jth cluster.
expectations = np.zeros((n_points, k)) # n_points x k np.array, where each row adds to 1
denominators = []
for i in np.arange(n_points):
denominator = 0
for j in np.arange(k):
# Calculate denominator, which is a sum over k
denominator_scale = pi[j] * 1/(2 * math.pi * np.sqrt(np.linalg.det(cov[j])))
denom = denominator_scale * np.exp(-0.5 * (data[i].reshape(2,1) - mu[j]).T @ np.linalg.inv(cov[j]) @ (data[i].reshape(2,1) - mu[j]))
denominator = np.add(denominator, denom)
denominator = np.asscalar(denominator)
denominators.append(denominator)
for i in np.arange(n_points):
numerator = 0
for j in np.arange(k):
# Calculate the numerator
numerator_scale = pi[j] * 1/(2 * math.pi * np.sqrt(np.linalg.det(cov[j])))
numer = np.exp(-0.5 * (data[i].reshape(2,1) - mu[j]).T @ np.linalg.inv(cov[j]) @ (data[i].reshape(2,1) - mu[j]))
numerator = numerator_scale * numer
# Set the probability of the ith point for the jth cluster
expectations[i][j] = numerator/denominators[i]
return expectations
def MStep(data, n_points, k, expectations):
## Performs the maximization (M) step ##
# We clear the parameters completely, since we recompute them each time
mu = [np.zeros((2,1)) for _ in np.arange(k)] # 3 2x1 np.arrays in a list
cov = [np.zeros((2,2)) for _ in np.arange(k)] # 3 2x2 np.arrays in a list
n_per_cluster = [0, 0, 0]
pi = [0, 0, 0]
## YOUR CODE HERE ##
# Update number of points in each cluster
# print(data[0].shape)
# print(mu[0].shape)
for i in range(0,3):
for j in range(0,n_points):
n_per_cluster[i] += expectations[j][i]
# Update mixing weights
for i in range(0,3):
pi[i] = (n_per_cluster[i]/n_points)
# Update means
for i in range(0,3):
for j in range(0, n_points):
mu[i] += expectations[j][i]*data[j].reshape(2,1)
mu[i] /= n_per_cluster[i]
# Update covariances
for i in range(0,3):
for j in range(0, n_points):
cov[i] += expectations[j][i] * (data[j].reshape(2,1) - mu[i]) @ (data[j].reshape(2,1) - mu[i]).T
cov[i] /= n_per_cluster[i]
## END YOUR CODE HERE ##
return n_per_cluster, pi, mu, cov
def loglikelihood(data, n_points, k, pi, mu, cov):
## Calculates ML estimate ##
likelihood = 0
scale = [] # When using logsumexp the scale is required to be in an array
exponents = [] # When using logsumexp the exponent is required to be in an array
## YOUR CODE HERE ##
for i in range(n_points):
for j in range(k):
det = np.linalg.det(cov[j])
det = math.sqrt(det)
scale.append(pi[j]/(2*math.pi*det))
exp = -0.5 * (data[i].reshape(2,1) - mu[j]).T @ np.linalg.inv(cov[j]) @ (data[i].reshape(2,1) - mu[j])
exponents.append(exp)
likelihood += logsumexp(exponents, b = scale)
scale = []
exponents = []
# Compute the log-likelihood estimate
## END YOUR CODE HERE ##
return likelihood
def ExpectationMaximization_GMM(data, n_per_cluster, n_points, k, pi, mu, cov):
## Performs expectation-maximization iteratively until convergence is reached ##
# You do not need to edit this function.
converged = False
ML_estimate = 0
while not converged:
# E-Step: find probabilities
expectations = EStep(data, n_points, k, pi, mu, cov)
# M-Step: recompute parameters
n_per_cluster, pi, mu, cov = MStep(data, n_points, k, expectations)
# Plot the current parameters against the data
# Ignore this, it just makes it look nice using some cool properties of eigenvectors!
## PLOT CODE ##
lambda_1, v1 = np.linalg.eig(cov[0])
lambda_1 = np.sqrt(lambda_1)
lambda_2, v2 = np.linalg.eig(cov[1])
lambda_2 = np.sqrt(lambda_2)
lambda_3, v3 = np.linalg.eig(cov[2])
lambda_3 = np.sqrt(lambda_3)
# Plot data
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
plt.plot(x_total,y_total,'x')
plt.grid()
# Plot ellipses
ell1 = Ellipse(xy=(mu[0][0], mu[0][1]),
width=lambda_1[0]*3, height=lambda_1[1]*3,
angle=np.rad2deg(np.arccos(v1[0, 0])), linewidth=5, edgecolor='red', facecolor='none')
ax.add_artist(ell1)
ell2 = Ellipse(xy=(mu[1][0], mu[1][1]),
width=lambda_2[0]*3, height=lambda_2[1]*3,
angle=np.rad2deg(np.arccos(v2[0, 0])), linewidth=5, edgecolor='green', facecolor='none')
ax.add_artist(ell2)
ell3 = Ellipse(xy=(mu[2][0], mu[2][1]),
width=lambda_3[0]*3, height=lambda_3[1]*3,
angle=np.rad2deg(np.arccos(v3[0, 0])), linewidth=5, edgecolor='yellow', facecolor='none')
ax.add_artist(ell3)
axe = plt.gca()
axe.set_xlim([-5,5])
axe.set_ylim([-5,5])
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Multivariate Gaussian - 3 Variables')
plt.show()
## END PLOT CODE ##
# Check for convergence via log likelihood
old_ML_estimate = np.copy(ML_estimate)
ML_estimate = loglikelihood(data, n_points, k, pi, mu, cov)
if abs(old_ML_estimate - ML_estimate) < 0.01:
converged = 1
return mu, cov
###Output
_____no_output_____
###Markdown
**c. Perform EM on the GMM you created.** Put it all together! Run the completed EM function on your dataset. (This part is already done for you, just run it and see the output.)
###Code
# Initialize total number of points (n), number of clusters (k),
# mixing weights (pi), means (mu) and covariance matrices (cov)
n_points = 150 # 150 points total
k = 3 # we know there are 3 clusters
mu = [(3 - (-3)) * np.random.rand(2,1) + (-3) for _ in np.arange(k)]
cov = [10 * np.identity(2) for _ in np.arange(k)]
n_per_cluster = [n_points/k for _ in np.arange(k)] # even split for now
pi = n_per_cluster
mu_estimate, cov_estimate = ExpectationMaximization_GMM(data, n_per_cluster, n_points, k, pi, mu, cov)
print("The estimates of the parameters of the Gaussians are: ")
print("Mu:", mu_estimate)
print("Covariance:", cov_estimate)
###Output
_____no_output_____ |
notebooks/01b-instructor-joint-conditional-probability.ipynb | ###Markdown
Joint Probability, Conditional Probability and Bayes' Rule
###Code
#Import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set()
%qtconsole
###Output
_____no_output_____
###Markdown
Learning Objectives of Part 1-b - To understand and be able to simulate joint probabilities and conditional probabilities;- To understand Bayes' Theorem and its utility. Joint Probability & Conditional Probability Joint Probability We have already encountered joint probabilities in the previous notebook, perhaps without knowing it: $P(A,B)$ is the probability two events $A$ and $B$ _both_ occurring.* For example, getting two heads in a row.If $A$ and $B$ are independent, then $P(A,B)=P(A)P(B)$ but be warned: this is not always (or often) the case.One way to think of this is considering "AND" as multiplication: the probability of A **and** B is the probability of A **multiplied** by the probability of B. Hands-On: Joint Probability and Coin Flipping Verify that $P(A,B)=P(A)P(B)$ in the two fair coin-flip case (A=heads, B=heads) by - first simulating two coins being flipped together and calculating the proportion of occurences with two heads;- then simulating one coin flip and calculating the proportion of heads and then doing that again and multiplying the two proportions.Your two calculations should give "pretty close" results and not the same results due to the (in)accuracy of simulation.
###Code
# Solution: Calculate P(A,B)
x_0 = np.random.binomial(2, 0.5, 10000)
p_ab = sum(x_0==2)/len(x_0)
# Now, plot the histogram of the results
plt.hist(x_0);
print(p_ab)
# Solution: Calculate P(A)P(B)
x_1 = np.random.binomial(1, 0.5, 10000)
x_2 = np.random.binomial(1, 0.5, 10000)
p_a = sum(x_1 == 1)/len(x_1)
p_b = sum(x_2 == 1)/len(x_2)
p_a*p_b
###Output
_____no_output_____
###Markdown
**Note:** In order to use such simulation and _hacker statistics_ approaches to "prove" results such as the above, we're gliding over several coupled and deep technicalities. This is in the interests of the pedagogical nature of this introduction. For the sake of completeness, we'll mention that we're essentially- Using the proportion in our simulations as a proxy for the probability (which, although Frequentist, is useful to allow you to start getting your hands dirty with probability via simluation).Having stated this, for ease of instruction, we'll continue to do so when thinking about joint & conditional probabilities of both simulated and real data. Hands-On: Joint probability for birds What is the probability that two randomly selected birds have beak depths over 10 ?
###Code
# Import data & store lengths in a pandas series
df_12 = pd.read_csv('../data/finch_beaks_2012.csv')
lengths = df_12['blength']
# Calculate P(A)P(B) of two birds having beak lengths > 10
p_a = (sum(lengths > 10))/len(lengths)
p_b = (sum(lengths > 10))/len(lengths)
p_a*p_b
###Output
_____no_output_____
###Markdown
* Calculate the joint probability using the resampling method, that is, by drawing random samples (with replacement) from the data. First calculate $P(A)P(B)$:
###Code
# Calculate P(A)P(B) using resampling methods
n_samples = 100000
p_a = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples
p_b = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples
p_a*p_b
###Output
_____no_output_____
###Markdown
Now calculate $P(A,B)$:
###Code
# Calculate P(A,B) using resampling methods
n_samples = 100000
samples = np.random.choice(lengths, (n_samples,2), replace=True)
_ = samples > (10, 10)
p_ab = sum(np.prod(_, axis=1))/n_samples
p_ab
###Output
_____no_output_____
###Markdown
**Task:** Interpret the results of your simulations. Conditional Probability Now that we have a grasp on joint probabilities, lets consider conditional probabilities, that is, the probability of some $A$, knowing that some other $B$ is true. We use the notation $P(A|B)$ to denote this. For example, you can ask the question "What is the probability of a finch beak having depth $<10$, knowing that the finch of of species 'fortis'?" Example: conditional probability for birds 1. What is the probability of a finch beak having depth > 10 ?2. What if we know the finch is of species 'fortis'?3. What if we know the finch is of species 'scandens'?
###Code
sum(df_12.blength > 10)/len(df_12)
df_fortis = df_12.loc[df_12['species'] == 'fortis']
sum(df_fortis.blength > 10)/len(df_fortis)
df_scandens = df_12.loc[df_12['species'] == 'scandens']
sum(df_scandens.blength > 10)/len(df_scandens)
###Output
_____no_output_____
###Markdown
**Note:** These proportions are definitely different. We can't say much more currently but we'll soon see how to use hypothesis testing to see what else we can say about the differences between the species of finches. Joint and conditional probabilitiesConditional and joint probabilites are related by the following:$$ P(A,B) = P(A|B)P(B)$$ **Homework exercise for the avid learner:** verify the above relationship using simulation/resampling techniques in one of the cases above.  Hands on example: drug testing **Question:** Suppose that a test for using a particular drug is 99% sensitive and 99% specific. That is, the test will produce 99% true positive results for drug users and 99% true negative results for non-drug users. Suppose that 0.5% (5 in 1,000) of people are users of the drug. What is the probability that a randomly selected individual with a positive test is a drug user?**If we can answer this, it will be really cool as it shows how we can move from knowing $P(+|user)$ to $P(user|+)$, a MVP for being able to move from $P(data|model)$ to $P(model|data)$.** In the spirit of this workshop, it's now time to harness your computational power and the intuition of simulation to solve this drug testing example. * Before doing so, what do you think the answer to the question _"What is the probability that a randomly selected individual with a positive test is a drug user?"_ is? Write down your guess.
###Code
# Take 10,000 subjects
n = 100000
# Sample for number of users, non-users
users = np.random.binomial(n, 0.005, 1)
non_users = n - users
# How many of these users tested +ve ?
u_pos = np.random.binomial(users, 0.99)
# How many of these non-users tested +ve ?
non_pos = np.random.binomial(non_users, 0.01)
# how many of those +ve tests were for users?
u_pos/(u_pos+non_pos)
###Output
_____no_output_____
###Markdown
Joint Probability, Conditional Probability and Bayes' Rule
###Code
#Import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set()
###Output
_____no_output_____
###Markdown
Learning Objectives of Part 1-b - To understand and be able to simulate joint probabilities and conditional probabilities;- To understand Bayes' Theorem and its utility. Joint Probability & Conditional Probability Joint Probability We have already encountered joint probabilities in the previous notebook, perhaps without knowing it: $P(A,B)$ is the probability two events $A$ and $B$ _both_ occurring.* For example, getting two heads in a row.If $A$ and $B$ are independent, then $P(A,B)=P(A)P(B)$ but be warned: this is not always (or often) the case.One way to think of this is considering "AND" as multiplication: the probability of A **and** B is the probability of A **multiplied** by the probability of B. Hands-On: Joint Probability and Coin Flipping Verify that $P(A,B)=P(A)P(B)$ in the two fair coin-flip case (A=heads, B=heads) by - first simulating two coins being flipped together and calculating the proportion of occurences with two heads;- then simulating one coin flip and calculating the proportion of heads and then doing that again and multiplying the two proportions.Your two calculations should give "pretty close" results and not the same results due to the (in)accuracy of simulation.
###Code
# Solution: Calculate P(A,B)
x_0 = np.random.binomial(2, 0.5, 10000)
p_ab = sum(x_0==2)/len(x_0)
# Now, plot the histogram of the results
plt.hist(x_0);
print(p_ab)
# Solution: Calculate P(A)P(B)
x_1 = np.random.binomial(1, 0.5, 10000)
x_2 = np.random.binomial(1, 0.5, 10000)
p_a = sum(x_1 == 1)/len(x_1)
p_b = sum(x_2 == 1)/len(x_2)
p_a*p_b
###Output
_____no_output_____
###Markdown
**Note:** In order to use such simulation and _hacker statistics_ approaches to "prove" results such as the above, we're gliding over several coupled and deep technicalities. This is in the interests of the pedagogical nature of this introduction. For the sake of completeness, we'll mention that we're essentially- Using the proportion in our simulations as a proxy for the probability (which, although Frequentist, is useful to allow you to start getting your hands dirty with probability via simluation).Having stated this, for ease of instruction, we'll continue to do so when thinking about joint & conditional probabilities of both simulated and real data. Hands-On: Joint probability for birds What is the probability that two randomly selected birds have beak depths over 10 ?
###Code
# Import data & store lengths in a pandas series
df_12 = pd.read_csv('../data/finch_beaks_2012.csv')
lengths = df_12['blength']
# Calculate P(A)P(B) of two birds having beak lengths > 10
p_a = (sum(lengths > 10))/len(lengths)
p_b = (sum(lengths > 10))/len(lengths)
p_a*p_b
###Output
_____no_output_____
###Markdown
* Calculate the joint probability using the resampling method, that is, by drawing random samples (with replacement) from the data. First calculate $P(A)P(B)$:
###Code
# Calculate P(A)P(B) using resampling methods
n_samples = 100000
p_a = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples
p_b = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples
p_a*p_b
###Output
_____no_output_____
###Markdown
Now calculate $P(A,B)$:
###Code
# Calculate P(A,B) using resampling methods
n_samples = 100000
samples = np.random.choice(lengths, (n_samples,2), replace=True)
_ = samples > (10, 10)
p_ab = sum(np.prod(_, axis=1))/n_samples
p_ab
###Output
_____no_output_____
###Markdown
**Task:** Interpret the results of your simulations. Conditional Probability Now that we have a grasp on joint probabilities, lets consider conditional probabilities, that is, the probability of some $A$, knowing that some other $B$ is true. We use the notation $P(A|B)$ to denote this. For example, you can ask the question "What is the probability of a finch beak having depth $<10$, knowing that the finch is of species 'fortis'?" Example: conditional probability for birds 1. What is the probability of a finch beak having depth > 10 ?2. What if we know the finch is of species 'fortis'?3. What if we know the finch is of species 'scandens'?
###Code
sum(df_12.blength > 10)/len(df_12)
df_fortis = df_12.loc[df_12['species'] == 'fortis']
sum(df_fortis.blength > 10)/len(df_fortis)
df_scandens = df_12.loc[df_12['species'] == 'scandens']
sum(df_scandens.blength > 10)/len(df_scandens)
###Output
_____no_output_____
###Markdown
**Note:** These proportions are definitely different. We can't say much more currently but we'll soon see how to use hypothesis testing to see what else we can say about the differences between the species of finches. Joint and conditional probabilitiesConditional and joint probabilites are related by the following:$$ P(A,B) = P(A|B)P(B)$$ **Homework exercise for the avid learner:** verify the above relationship using simulation/resampling techniques in one of the cases above.  Hands on example: drug testing **Question:** Suppose that a test for using a particular drug is 99% sensitive and 99% specific. That is, the test will produce 99% true positive results for drug users and 99% true negative results for non-drug users. Suppose that 0.5% (5 in 1,000) of people are users of the drug. What is the probability that a randomly selected individual with a positive test is a drug user?**If we can answer this, it will be really cool as it shows how we can move from knowing $P(+|user)$ to $P(user|+)$, a MVP for being able to move from $P(data|model)$ to $P(model|data)$.** In the spirit of this workshop, it's now time to harness your computational power and the intuition of simulation to solve this drug testing example. * Before doing so, what do you think the answer to the question _"What is the probability that a randomly selected individual with a positive test is a drug user?"_ is? Write down your guess.
###Code
# Take 10,000 subjects
n = 100000
# Sample for number of users, non-users
users = np.random.binomial(n, 0.005, 1)
non_users = n - users
# How many of these users tested +ve ?
u_pos = np.random.binomial(users, 0.99)
# How many of these non-users tested +ve ?
non_pos = np.random.binomial(non_users, 0.01)
# how many of those +ve tests were for users?
u_pos/(u_pos+non_pos)
###Output
_____no_output_____
###Markdown
Joint Probability, Conditional Probability and Bayes' Rule
###Code
#Import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set()
###Output
_____no_output_____
###Markdown
Learning Objectives of Part 1-b - To understand and be able to simulate joint probabilities and conditional probabilities;- To understand Bayes' Theorem and its utility. Joint Probability & Conditional Probability Joint Probability We have already encountered joint probabilities in the previous notebook, perhaps without knowing it: $P(A,B)$ is the probability two events $A$ and $B$ _both_ occurring.* For example, getting two heads in a row.If $A$ and $B$ are independent, then $P(A,B)=P(A)P(B)$ but be warned: this is not always (or often) the case.One way to think of this is considering "AND" as multiplication: the probability of A **and** B is the probability of A **multiplied** by the probability of B. Hands-On: Joint Probability and Coin Flipping Verify that $P(A,B)=P(A)P(B)$ in the two fair coin-flip case (A=heads, B=heads) by - first simulating two coins being flipped together and calculating the proportion of occurences with two heads;- then simulating one coin flip and calculating the proportion of heads and then doing that again and multiplying the two proportions.Your two calculations should give "pretty close" results and not the same results due to the (in)accuracy of simulation.
###Code
# Solution: Calculate P(A,B)
x_0 = np.random.binomial(2, 0.5, 10000)
p_ab = sum(x_0==2)/len(x_0)
# Now, plot the histogram of the results
plt.hist(x_0);
print(p_ab)
# Solution: Calculate P(A)P(B)
x_1 = np.random.binomial(1, 0.5, 10000)
x_2 = np.random.binomial(1, 0.5, 10000)
p_a = sum(x_1 == 1)/len(x_1)
p_b = sum(x_2 == 1)/len(x_2)
p_a*p_b
###Output
_____no_output_____
###Markdown
**Note:** In order to use such simulation and _hacker statistics_ approaches to "prove" results such as the above, we're gliding over several coupled and deep technicalities. This is in the interests of the pedagogical nature of this introduction. For the sake of completeness, we'll mention that we're essentially- Using the proportion in our simulations as a proxy for the probability (which, although Frequentist, is useful to allow you to start getting your hands dirty with probability via simluation).Having stated this, for ease of instruction, we'll continue to do so when thinking about joint & conditional probabilities of both simulated and real data. Hands-On: Joint probability for birds What is the probability that two randomly selected birds have beak depths over 10 ?
###Code
# Import data & store lengths in a pandas series
df_12 = pd.read_csv('../data/finch_beaks_2012.csv')
lengths = df_12['blength']
# Calculate P(A)P(B) of two birds having beak lengths > 10
p_a = (sum(lengths > 10))/len(lengths)
p_b = (sum(lengths > 10))/len(lengths)
p_a*p_b
###Output
_____no_output_____
###Markdown
* Calculate the joint probability using the resampling method, that is, by drawing random samples (with replacement) from the data. First calculate $P(A)P(B)$:
###Code
# Calculate P(A)P(B) using resampling methods
n_samples = 100000
p_a = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples
p_b = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples
p_a*p_b
###Output
_____no_output_____
###Markdown
Now calculate $P(A,B)$:
###Code
# Calculate P(A,B) using resampling methods
n_samples = 100000
samples = np.random.choice(lengths, (n_samples,2), replace=True)
_ = samples > (10, 10)
p_ab = sum(np.prod(_, axis=1))/n_samples
p_ab
###Output
_____no_output_____
###Markdown
**Task:** Interpret the results of your simulations. Conditional Probability Now that we have a grasp on joint probabilities, lets consider conditional probabilities, that is, the probability of some $A$, knowing that some other $B$ is true. We use the notation $P(A|B)$ to denote this. For example, you can ask the question "What is the probability of a finch beak having depth $<10$, knowing that the finch of of species 'fortis'?" Example: conditional probability for birds 1. What is the probability of a finch beak having depth > 10 ?2. What if we know the finch is of species 'fortis'?3. What if we know the finch is of species 'scandens'?
###Code
sum(df_12.blength > 10)/len(df_12)
df_fortis = df_12.loc[df_12['species'] == 'fortis']
sum(df_fortis.blength > 10)/len(df_fortis)
df_scandens = df_12.loc[df_12['species'] == 'scandens']
sum(df_scandens.blength > 10)/len(df_scandens)
###Output
_____no_output_____
###Markdown
**Note:** These proportions are definitely different. We can't say much more currently but we'll soon see how to use hypothesis testing to see what else we can say about the differences between the species of finches. Joint and conditional probabilitiesConditional and joint probabilites are related by the following:$$ P(A,B) = P(A|B)P(B)$$ **Homework exercise for the avid learner:** verify the above relationship using simulation/resampling techniques in one of the cases above.  Hands on example: drug testing **Question:** Suppose that a test for using a particular drug is 99% sensitive and 99% specific. That is, the test will produce 99% true positive results for drug users and 99% true negative results for non-drug users. Suppose that 0.5% (5 in 1,000) of people are users of the drug. What is the probability that a randomly selected individual with a positive test is a drug user?**If we can answer this, it will be really cool as it shows how we can move from knowing $P(+|user)$ to $P(user|+)$, a MVP for being able to move from $P(data|model)$ to $P(model|data)$.** In the spirit of this workshop, it's now time to harness your computational power and the intuition of simulation to solve this drug testing example. * Before doing so, what do you think the answer to the question _"What is the probability that a randomly selected individual with a positive test is a drug user?"_ is? Write down your guess.
###Code
# Take 10,000 subjects
n = 100000
# Sample for number of users, non-users
users = np.random.binomial(n, 0.005, 1)
non_users = n - users
# How many of these users tested +ve ?
u_pos = np.random.binomial(users, 0.99)
# How many of these non-users tested +ve ?
non_pos = np.random.binomial(non_users, 0.01)
# how many of those +ve tests were for users?
u_pos/(u_pos+non_pos)
###Output
_____no_output_____
###Markdown
Joint Probability, Conditional Probability and Bayes' Rule
###Code
#Import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set()
###Output
_____no_output_____
###Markdown
Learning Objectives of Part 1-b - To understand and be able to simulate joint probabilities and conditional probabilities;- To understand Bayes' Theorem and its utility. Joint Probability & Conditional Probability Joint Probability We have already encountered joint probabilities in the previous notebook, perhaps without knowing it: $P(A,B)$ is the probability two events $A$ and $B$ _both_ occurring.* For example, getting two heads in a row.If $A$ and $B$ are independent, then $P(A,B)=P(A)P(B)$ but be warned: this is not always (or often) the case.One way to think of this is considering "AND" as multiplication: the probability of A **and** B is the probability of A **multiplied** by the probability of B. Hands-On: Joint Probability and Coin Flipping Verify that $P(A,B)=P(A)P(B)$ in the two fair coin-flip case (A=heads, B=heads) by - first simulating two coins being flipped together and calculating the proportion of occurences with two heads;- then simulating one coin flip and calculating the proportion of heads and then doing that again and multiplying the two proportions.Your two calculations should give "pretty close" results and not the same results due to the (in)accuracy of simulation.
###Code
# Solution: Calculate P(A,B)
x_0 = np.random.binomial(2, 0.5, 10000)
p_ab = sum(x_0==2)/len(x_0)
# Now, plot the histogram of the results
plt.hist(x_0);
print(p_ab)
# Solution: Calculate P(A)P(B)
x_1 = np.random.binomial(1, 0.5, 10000)
x_2 = np.random.binomial(1, 0.5, 10000)
p_a = sum(x_1 == 1)/len(x_1)
p_b = sum(x_2 == 1)/len(x_2)
p_a*p_b
###Output
_____no_output_____
###Markdown
**Note:** In order to use such simulation and _hacker statistics_ approaches to "prove" results such as the above, we're gliding over several coupled and deep technicalities. This is in the interests of the pedagogical nature of this introduction. For the sake of completeness, we'll mention that we're essentially- Using the proportion in our simulations as a proxy for the probability (which, although Frequentist, is useful to allow you to start getting your hands dirty with probability via simluation).Having stated this, for ease of instruction, we'll continue to do so when thinking about joint & conditional probabilities of both simulated and real data. Hands-On: Joint probability for birds What is the probability that two randomly selected birds have beak depths over 10 ?
###Code
# Import data & store lengths in a pandas series
df_12 = pd.read_csv('../data/finch_beaks_2012.csv')
lengths = df_12['blength']
# Calculate P(A)P(B) of two birds having beak lengths > 10
p_a = (sum(lengths > 10))/len(lengths)
p_b = (sum(lengths > 10))/len(lengths)
p_a*p_b
###Output
_____no_output_____
###Markdown
* Calculate the joint probability using the resampling method, that is, by drawing random samples (with replacement) from the data. First calculate $P(A)P(B)$:
###Code
# Calculate P(A)P(B) using resampling methods
n_samples = 100000
p_a = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples
p_b = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples
p_a*p_b
###Output
_____no_output_____
###Markdown
Now calculate $P(A,B)$:
###Code
# Calculate P(A,B) using resampling methods
n_samples = 100000
samples = np.random.choice(lengths, (n_samples,2), replace=True)
_ = samples > (10, 10)
p_ab = sum(np.prod(_, axis=1))/n_samples
p_ab
###Output
_____no_output_____
###Markdown
**Task:** Interpret the results of your simulations. Conditional Probability Now that we have a grasp on joint probabilities, lets consider conditional probabilities, that is, the probability of some $A$, knowing that some other $B$ is true. We use the notation $P(A|B)$ to denote this. For example, you can ask the question "What is the probability of a finch beak having depth $<10$, knowing that the finch of of species 'fortis'?" Example: conditional probability for birds 1. What is the probability of a finch beak having depth > 10 ?2. What if we know the finch is of species 'fortis'?3. What if we know the finch is of species 'scandens'?
###Code
sum(df_12.blength > 10)/len(df_12)
df_fortis = df_12.loc[df_12['species'] == 'fortis']
sum(df_fortis.blength > 10)/len(df_fortis)
df_scandens = df_12.loc[df_12['species'] == 'scandens']
sum(df_scandens.blength > 10)/len(df_scandens)
###Output
_____no_output_____
###Markdown
**Note:** These proportions are definitely different. We can't say much more currently but we'll soon see how to use hypothesis testing to see what else we can say about the differences between the species of finches. Joint and conditional probabilitiesConditional and joint probabilites are related by the following:$$ P(A,B) = P(A|B)P(B)$$ **Homework exercise for the avid learner:** verify the above relationship using simulation/resampling techniques in one of the cases above.  Hands on example: drug testing **Question:** Suppose that a test for using a particular drug is 99% sensitive and 99% specific. That is, the test will produce 99% true positive results for drug users and 99% true negative results for non-drug users. Suppose that 0.5% (5 in 1,000) of people are users of the drug. What is the probability that a randomly selected individual with a positive test is a drug user?**If we can answer this, it will be really cool as it shows how we can move from knowing $P(+|user)$ to $P(user|+)$, a MVP for being able to move from $P(data|model)$ to $P(model|data)$.** In the spirit of this workshop, it's now time to harness your computational power and the intuition of simulation to solve this drug testing example. * Before doing so, what do you think the answer to the question _"What is the probability that a randomly selected individual with a positive test is a drug user?"_ is? Write down your guess.
###Code
# Take 10,000 subjects
n = 100000
# Sample for number of users, non-users
users = np.random.binomial(n, 0.005, 1)
non_users = n - users
# How many of these users tested +ve ?
u_pos = np.random.binomial(users, 0.99)
# How many of these non-users tested +ve ?
non_pos = np.random.binomial(non_users, 0.01)
# how many of those +ve tests were for users?
u_pos/(u_pos+non_pos)
###Output
_____no_output_____ |
solutions/Solutions.ipynb | ###Markdown
Solutions --- Exercise 2.3.1 Add the following data to the `mySerie1` pandas Series object as a new row:```GeneK 25```
###Code
# Make a pandas Series from a dictionary and append it to the `mySerie1` pandas Series object.
mySerie2 = pd.Series({"GeneK" : 25})
mySerie1 = mySerie1.append(mySerie2)
# Alternatively and a bit shorter
mySerie1['GeneK'] = 25
###Output
_____no_output_____
###Markdown
--- Exercise 2.4.1- Select the number of counts in *GeneD* for the second and third experiment. - Add a new column to the dataframe with the average of the three experiments.
###Code
# Subquestion one
#df[['counts_exp2','counts_exp3']][3:4]
#df.iloc[[3],[1,2]]
#df.loc[['GeneD'],['counts_exp2','counts_exp3']]
# Subquestion two
#df['avg'] = df.sum(axis = 1) / len(df.columns)
#df['avg'] = (df['counts_exp1'] + df['counts_exp2'] + df['counts_exp3']) / 3
###Output
_____no_output_____
###Markdown
--- Exercise 2.4.2- Search in the pandas documentation for the median method and add a column that describes the median countvalues per gene.- Search in the pandas documentation for a method that will count all of the values of one experiment and add it as an extra row to the table. - Remove the row with the sum of the counts that we added in the previous step.
###Code
# Subquestion one
df.median(axis = 1, skipna = True)
df['median'] = df.median(axis = 1, skipna = True)
# Subquestion two
df.sum()
df[['counts_exp1','counts_exp2','counts_exp3']].sum()
df.loc['sum'] = df[['counts_exp1','counts_exp2','counts_exp3','avg']].sum()
df
# Alternatively, more according to what we have seen:
row_data = df[:].sum()
row_series = pd.Series(data=row_data, name='sum')
df.append(row_series, ignore_index=False)
# Subquestion three
df.drop('sum', axis = 0, inplace=True)
###Output
_____no_output_____
###Markdown
--- Exercise 2.5.1 - ASearch for the parameters of `.read_csv` that you need in order to read in the `metagenic.csv` file where:- chromosomes are the index of the rows, and - only the first 10 rows are imported.
###Code
metagenic_sub = pd.read_csv('data/metagenic.csv', index_col = 'chr', nrows = 10)
metagenic_sub
metagenic_sub = pd.read_csv('data/metagenic.csv', index_col = 0, nrows=10, header=0)
metagenic_sub
###Output
_____no_output_____
###Markdown
Exercise 2.5.1 - BImport the data from the `metagenic.csv` file and add a new column with the total counts for each chromosome (e.g. chromosome 21 has 88 counts), and sort the table by descending total counts per chromosome.
###Code
# Import data
metagenic = pd.read_csv('data/metagenic.csv')
# Add a column with total counts
metagenic["total"] = metagenic.sum(axis = 1)
# Order the table per total counts
metagenic = metagenic.sort_values("total", ascending = False)
metagenic
###Output
_____no_output_____
###Markdown
--- Exercise 2.5.2Can you find a method that will retrieve the indices of all the virginica flowers?
###Code
iris.loc[iris['species'] == 'virginica'].index
###Output
_____no_output_____
###Markdown
--- Exercise 2.5.3From the file `metagenic.csv`:1. Sort the table based on the counts in exons in descending way 2. Make a subselection of chromosomes with at least 15 counts in introns.
###Code
metagenic = pd.read_csv('data/metagenic.csv', index_col = 'chr')
#metagenic
# Subquestion one
metagenic.sort_values(by=['exon'], ascending = False)
# Subquestion two
metagenic[metagenic['intron'] >= 15]
###Output
_____no_output_____
###Markdown
--- Exercise 2.5.4For this exercise we will use [this dataset](https://datahub.io/core/pharmaceutical-drug-spending) which contains the spendings of a bunch of countries in the pharmaceutical industry as from 1971. The dataset is available in the data folder as `pharmaspending.csv`. Make a subselection of this dataset that contains the data for Belgium and its neigbhouring countries France, Germany and the Netherlands. Furthermore, we're only interested in the data starting from the year 2000.
###Code
# Download data from datahub.io.
pharma = pd.read_csv('data/pharmaspending.csv')
# Extract years of interest
pharma_2000 = pharma[pharma.TIME >= 2000]
# Countries of interest
countries = ['BEL', 'FRA', 'DEU', 'NLD']
pharma = pharma_2000[pharma_2000['LOCATION'].isin(countries)]
# Fix index
pharma.reset_index(drop=True)
# Store dataset
pharma.to_csv('data/pharmaspending_subset.csv')
# Download data from datahub.io.
pharma = pd.read_csv('data/pharmaspending.csv')
# Countries of interest
countries = ['BEL', 'FRA', 'DEU', 'NLD']
# Extract years of interest
pharma_2000 = pharma[pharma.TIME >= 2000]
# Make empty dataframe
sub_pharma = pd.DataFrame()
# Make subselection dataframe with the data of the countries of interest
for country in countries:
sub_pharma = sub_pharma.append(pharma_2000.loc[pharma_2000['LOCATION'] == country], ignore_index=True)
sub_pharma
# Store dataset
sub_pharma.to_csv('data/pharmaspending_subset.csv')
###Output
_____no_output_____
###Markdown
--- Exercise 2.5.5In this exercise, derived from the [GTN](https://galaxyproject.github.io/training-material/topics/transcriptomics/tutorials/rna-seq-viz-with-heatmap2/tutorial.html), we will prepare the data to create a heatmap (*Exercise 3.x.y?*) of the top differentially expressed genes in an RNA-seq counts dataset. - [`counts`](https://zenodo.org/record/2529926/files/limma-voom_normalised_counts)- [`de_genes`](https://zenodo.org/record/2529926/files/limma-voom_luminalpregnant-luminallactate) The latter file contains the results from comparing gene expression in the luminal cells in the pregnant versus lactating mice. It includes genes that are not significantly differentially expressed. We’ll call genes significantly differentially expressed in this dataset if they pass the thresholds of `adjusted P-value 1.5 (log2FC of 0.58)`. Filter the top 20 DE genes from that table and create a joint dataframe that contains only the following columns and looks like this:| SYMBOL_x | MCL1.DG | MCL1.DH | MCL1.DI | MCL1.DJ | MCL1.DK | MCL1.DL | MCL1.LA | MCL1.LB | MCL1.LC | MCL1.LD | MCL1.LE | MCL1.LF ||---------:|---------:|---------:|---------:|---------:|----------:|----------:|---------:|---------:|---------:|---------:|---------:|----------|| Ggt1 | 6.732347 | 6.556047 | 6.558849 | 6.586562 | 6.437596 | 6.394067 | 5.193118 | 5.526432 | 4.223990 | 4.341605 | 7.243899 | 7.354535 || Slc39a4 | 2.722153 | 3.027691 | 2.175532 | 1.993214 | -0.193255 | -0.016902 | 3.071502 | 2.928202 | 6.472918 | 6.526836 | 2.430346 | 1.847241 || Ppl | 5.102274 | 4.900942 | 5.755087 | 5.951023 | 6.851420 | 6.881858 | 7.359977 | 7.732010 | 8.227118 | 8.437499 | 4.646145 | 4.798986 || ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |Save the file as a csv-file in the data-folder.
###Code
import pandas as pd
counts = pd.read_csv('https://zenodo.org/record/2529926/files/limma-voom_normalised_counts', sep='\t')
de_genes = pd.read_csv('https://zenodo.org/record/2529926/files/limma-voom_luminalpregnant-luminallactate', sep='\t')
# Filters
p_adj_lim = 0.01
logFC_lim = 0.58
# Filter the non-significantly differentially expressed genes out
de_genes = de_genes[abs(de_genes['logFC']) > logFC_lim]
de_genes = de_genes[de_genes['P.Value'] < p_adj_lim]
# Sort the remaining significantly expressed genes (highest DE genes on top)
de_genes = de_genes.sort_values('P.Value')
# Filter the top 20 DE genes
de_top20 = de_genes.iloc[0:20, :]
# Create dataframe for heatmap that is a joined dataframe of the two imported data files
df_heatmap = pd.merge(counts, de_top20, on='ENTREZID')
# Make a subselection of the columns (genes and DE, see the df above)
df_heatmap = df_heatmap.iloc[:, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]]
# Set the names of the genes as the row index
df_heatmap = df_heatmap.set_index('SYMBOL_x')
# Store the dataframe in a csv file for later usage.
df_heatmap.to_csv('data/heatmap_data.csv')
df_heatmap
###Output
_____no_output_____
###Markdown
--- Question:Knowing that the dataset contains data for 36 countries from 1971 until 2015. What happened when Seaborn made this plot for us? --- Many seaborn functions can automatically perform the statistical estimation and plot it on your graph. More complex datasets will have multiple measurements for the same value of the x variable. The default behavior in seaborn is to aggregate the multiple measurements at each x value by plotting the mean and the 95% confidence interval around the mean. This is the lineplot of the percentage of GDP for all the countries in this dataframe. How can you know there are 36 countries in this dataset? ```pythonlen(pharma['LOCATION'].unique())``` --- 3.2.3.1 Exercise:Plot the same barplot but only for Belgium vs the Netherlands. Find a barplot argument that selects which country is selected and hence plotted (instead of making another subselection of the pandas dataframe).**Extra**: Adjust the figure with error bar caps, your favourite color palette, make it a horizontal barplot, or experiment with any of the other features.
###Code
# Plot barplot with only BEL and NLD
ax = sns.barplot(x = 'LOCATION', y = 'TOTAL_SPEND', data = pharma, order=['BEL', 'NLD'])
# Plot barplot with only BEL and NLD
ax = sns.barplot(x = 'TOTAL_SPEND', y = 'LOCATION', data = pharma, order=['BEL', 'NLD'], palette = "deep", capsize=.1, errwidth="2")
###Output
_____no_output_____
###Markdown
--- 3.2.5.1 ExerciseMimic the graph given below which is extracted from the [tutorial](https://training.galaxyproject.org/training-material/topics/transcriptomics/tutorials/rna-seq-counts-to-viz-in-r/tutorial.htmlvolcano-plot) and represents the final result. - Use a darkgrid background- Color the dots according to its strand orientations- Add a title, x- and y-labels - Save the figure Extra: figure out how you can:- [Move the legend outside of the plot](https://www.delftstack.com/howto/matplotlib/how-to-place-legend-outside-of-the-plot-in-matplotlib/)- [Remove the upper and right spine of the plots](http://seaborn.pydata.org/generated/seaborn.despine.html)
###Code
# The following will make a grid on a gray-ish background
sns.set(style='darkgrid')
# Make scatterplot with hue set to the Strand.
sns.scatterplot(x='log2(FC)', y='Log10 P-value', data = volc, hue='Strand')
# Some minor modifications: title, x- and y-axis label
plt.xlabel('log2(Fold change)')
plt.ylabel('-log10(P-values)')
plt.title('Differentially expressed genes')
# Legend outside of the plot
plt.legend(bbox_to_anchor=(1, 0.55), title='Strand:')
# Despine literally removes the spines (top and right axis)
sns.despine()
plt.savefig('img/volcano-plot-mimic.png')
###Output
_____no_output_____
###Markdown
--- 3.3.1 Exercise Importing and inspect the dataset from [datahub.io](https://datahub.io/core/genome-sequencing-costs) containing the cost of genome sequencing throughout the years (also stored as `data/sequencing_costs.csv`).- Set the style to a white background with ticks on the axes- Set the context to a paper format- Change the figure size- Rename the x- and y-label and title of the plot 
###Code
# 1. Import data
seqcost = pd.read_csv('data/sequencing_costs.csv', sep=',')
# 2. Set style of the plot
sns.set_style("white")
sns.set_context("paper")
# 3. Define/create the plot
plt.figure(figsize=(10,4))
ax = sns.lineplot(x = 'Date', y = 'Cost per Mb', data = seqcost)
# 4. Tweak lay-out
ax.set(xlabel='Years', ylabel='Cost per Mb', title='Cost of sequencing')
plt.setp(ax.get_xticklabels(), rotation=90)
sns.despine()
#plt.savefig('img/seqcost.png')
###Output
_____no_output_____
###Markdown
--- 3.3.2 Extra exerciseMake two subplots underneath each other that plot the Cost per Mb over years and the Total cost. Find more information on subplots [here](https://matplotlib.org/3.1.0/gallery/subplots_axes_and_figures/subplots_demo.html).
###Code
# 1. Import data
seqcost = pd.read_csv('data/sequencing_costs.csv', sep=',')
# 2. Set style of the plot
sns.set_style("ticks")
sns.set_context("paper")
# 3. Define/create the plot
fig, ax = plt.subplots(nrows = 2, ncols = 1, figsize= (10,7))
sns.lineplot(x = 'Date', y = 'Cost per Mb', data = seqcost, color = 'b', ax = ax[0])
sns.lineplot(x = 'Date', y = 'Cost per Genome', data = seqcost, color = 'r', ax = ax[1])
# 4. Tweak lay-out
fig.suptitle('Vertically stacked subplots')
sns.despine()
plt.setp(ax[0].get_xticklabels(), rotation=90)
plt.setp(ax[1].get_xticklabels(), rotation=90)
plt.tight_layout()
#plt.savefig('img/subplots_seqcost.png')
###Output
_____no_output_____
###Markdown
--- 5.2.1 ExerciseCalculate the GC-content in the following sequence:```GATTACCACTCACTGACTCACTGACACGAGACCTATACATGATCGCCGGATGATACGAGAATTACTGACGACTAATCCCGGATACTGCATACACTGACGACGACT```- Use the `.count()` method as shown above- Search through Bio.SeqUtils for a function that might help you
###Code
ex_seq = Seq("GATTACCACTCACTGACTCACTGACACGAGACCTATACATGATCGCCGGATGATACGAGAATTACTGACGACTAATCCCGGATACTGCATACACTGACGACGACT")
# GC content
100*float(ex_seq.count("G")+ex_seq.count("C"))/len(ex_seq)
# Use the built-in method of Bio.SeqUtils
from Bio.SeqUtils import GC
GC(ex_seq)
# the Bio.SeqUtils.GC() function should automatically cope with mixed case
# sequences and the ambiguous nucleotide S which means G or C.
###Output
_____no_output_____
###Markdown
5.2.2 Extra exercise- Find all occurrences of the subsequence `TGA` and its positions. `TGA` will code for a stop-codon in the translation process. Knowing where it occurs, extract the first subsequence from the sequence. - Calculate the molecular weight of the sequence its translation product "ATGGCCATTGTAATGG"
###Code
from Bio.SeqUtils import nt_search
ex_seq = "ATGGATTACCACTCACTGCCTCACTGACACGAGACCTATACATG"
stop_seq = "TGA"
occ_TGA = nt_search(ex_seq, stop_seq)
sub_seq = ex_seq[:occ_TGA[1]]
sub_seq
###Output
_____no_output_____
###Markdown
Note: the nt_search function does not accept Seq objects as an input. Agree, this is suboptimal, we have to work with what we get.
###Code
from Bio.SeqUtils import molecular_weight
weightDNA = molecular_weight(Seq(ex_seq))
weightProt = molecular_weight(Seq(sub_seq).translate(), 'protein')
print(f"Molecular weight of DNA: {weightDNA:.2f}\nMolecular weight of Protein: {weightProt:.2f}")
###Output
_____no_output_____
###Markdown
--- 5.3.1 ExerciseCan you concatenate the following sequences (using a `for`-loop or the built-in `sum` function)?- Seq("ACGT")- Seq("GCTA")- Seq("TACG")
###Code
# Method 1
seq1 = Seq("ACGT")
seq2 = Seq("GCTA")
seq3 = Seq("TACG")
list_of_seqs = [seq1, seq2, seq3]
concatenated = Seq('')
for each_seq in list_of_seqs:
concatenated += each_seq
print(concatenated)
concatenated
# Method 2
list_of_seqs = [Seq("ACGT"), Seq("GCTA"),Seq("TACG")]
sum(list_of_seqs, Seq(""))
###Output
_____no_output_____
###Markdown
--- 5.7 ExerciseIdentifying genes is possible by looking for open reading frames (ORFs). For eukaryotic genes we know that there is a complex interaction between promotors, start codons, exons and introns. Nonetheless, for prokaryotic and virus genes this approach would still be useful. Depending on the organism you also need to use the according codon table. In this exercise we're using a bacterial plasmid fasta file for which we need to use codon [table 11](https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgiSG11). Write a function that accepts a DNA sequence and stores translated sequences in a pandas DataFrame, define the tranlate tables and define that a possible protein needs to be of a minimum length of 100 AA's. Input arguments of the function:- `record`: DNA sequence (`Seq` object)- `strand`: sense or antisense (+1/-1)- `frame`: frameshift mutation (0/1/2)- `table`: translation table (e.g. 11)- `min_len`: minimum length of protein sequences to be included (e.g. 100) The output might look something like this: | | Sequence | Length | Strand | Frame ||--:|--------------------------------------------------:|-------:|-------:|------:|| 0 | WGKLQVIGLSMWMVLFSQRFDDWLNEQEDALQEKVLADLKKLQVYG... | 125 | -1 | 1 || 1 | RGIFMSDTMVVNGSGGVPAFLFSGSTLSSYRPNFEANSITIALPHY... | 361 | -1 | 1 || 2 | WDVKTVTGVLHHPFHLTFSLCPEGATQSGREAHLLAELPQRRMEPV... | 111 | -1 | 1 |
###Code
# Define the arguments and function
def extract_ORF(record, strand, frame, table, min_len):
"""extract_ORF accepts a sequence record object as argument together with a strand orientation
and frameshift and will give you as an output all of the possible ORFs from that sequence record object
that are longer than a predefined minimal length of AAs using a specific codon table"""
# Create empty dataframe that will store all the information
seq_info = pd.DataFrame(columns=["Sequence", "Length", "Strand", "Frame"])
# Change DNA sequence according to strand orientation
if strand == -1: # Antisense strand orientation
record_seq = record.seq.reverse_complement()
elif strand == +1: # Sense strand orientation
record_seq = record.seq
# There are more elegant solutions than this...
# Change DNA sequence according to frameshift mutation. No frameshift, nothing happens.
length = 3 * ((len(record)-frame) // 3)
if frame == 1: # Frameshift 1
record_seq = record_seq[frame:frame+length]
elif frame == 2: # Frameshift 2
record_seq = record_seq[frame:frame+length]
# There are more elegant solutions than this...
# Iterate over each possible translation
for sub_prot in record_seq.translate(table=table).split('*'):
# If the possible translation is longer than min_len, add it to the DataFrame
if len(sub_prot) >= min_len:
seq_info = seq_info.append({'Sequence': str(sub_prot),
'Length': len(sub_prot),
'Strand': strand,
'Frame': frame}, ignore_index = True)
return seq_info
extract_ORF(record=record, strand=-1, frame=1, table=11, min_len=100)
import pandas as pd
from Bio import SeqIO
record = SeqIO.read("data/NC_005816.fna", "fasta")
table = 11
min_pro_len = 100
extract_ORF(record=record, strand=-1, frame=1, table=11, min_len=100)
# Make a for loop to access first the + strand and then the - strand
for strand, nuc in [(+1, record.seq), (-1, record.seq.reverse_complement())]:
# Allow frame shifts
for frame in range(3):
# Length of sequence adjusted for frame shift
length = 3 * ((len(record)-frame) // 3)
# Translate sequence (nuc) to AA's. Split at * which decodes for a stop codon. These are all the ORFs
for pro in nuc[frame:frame+length].translate(table).split("*"):
# if length of ORF is >= predefined length
if len(pro) >= min_pro_len:
# Print results
print(f"{pro[:30]}...{pro[-3:]}, - length {len(pro)}, strand {strand}, frame {frame}")
###Output
_____no_output_____
###Markdown
--- 6.1.1 ExerciseFind the title of all the articles related to the genbank entry 'NC_005816'. Import this file using the following block of code. Extra: Create a list of URL-links that brings you directly to the article. For this you can use the Pubmed ID in combination with `https://pubmed.ncbi.nlm.nih.gov/`. Hint: look at the section of *references* of [this link](https://biopython.readthedocs.io/en/latest/chapter_seq_annot.html)
###Code
from Bio import SeqIO
record = SeqIO.read("data/NC_005816.gb","gb")
# First create an empty list that will store the URL-links.
list_pubmed = []
base_url = 'https://pubmed.ncbi.nlm.nih.gov/'
for ref in record.annotations['references']:
# The titles of the journals are written in the title submodule
print(ref.title)
# Extra: if there is a pubmed ID
if ref.pubmed_id:
# Prepare to add pubmed ID together with Base URL
url_link = (base_url, ref.pubmed_id)
# Join the pubmed ID with Base URL and append this to the list of Pubmed ID's
list_pubmed.append(''.join(url_link))
print(list_pubmed)
###Output
_____no_output_____
###Markdown
--- 7.1.1 ExerciseMake a list that contains the organism of each record in the `data/ls_orchid.gbk`-file. Tip: you should make an empty list, iterate over all the records, access the organism and append it to the list.
###Code
from Bio import SeqIO
# Method 1 by using the annotations (cleaner)
all_species= []
for seq_record in SeqIO.parse("data/ls_orchid.gbk","genbank"):
all_species.append(seq_record.annotations["organism"])
print(set(all_species))
# Method 2 by using the description (can be a bit tricky if the name of the organism is not on the second location)
all_species = []
for seq_record in SeqIO.parse("data/ls_orchid.fasta","fasta"):
all_species.append(seq_record.description.split()[1])
print(all_species)
###Output
_____no_output_____
###Markdown
8.3 ExerciseWrite a script that blasts the top 5 overrepresented sequences in a fastq-file. Save the following information in a pandas dataframe: title, e-value and score. Here is a table that is part of the output of a FastQC process. The raw data can be obtained from the zipped folder that is always created as part of the process. This part represents the overrepresented sequences in a fastq file. The file that contains the data is stored under `data/overrepresented_sequences.txt`. ```Sequence Count Percentage Possible SourceGCGCCAGGTTCCACACGAACGTGCGTTCAACGTGACGGGCGAGAGGGCGG 634749 0.9399698125201895 No HitGCCAGGTTCCACACGAACGTGCGTTCAACGTGACGGGCGAGAGGGCGGCC 437871 0.6484224816077345 No HitGGGGACAGTCCGCCCCGCCCCCCACCGGGCCCCGAGAGAGGCGACGGAGG 319343 0.47289996493044484 No HitGGCTTCCTCGGCCCCGGGATTCGGCGAAAGCTGCGGCCGGAGGGCTGTAA 310651 0.4600283926862577 No HitGGGCCTTCCCGGCCGTCCCGGAGCCGGTCGCGGCGCACCGCCACGGTGGA 260086 0.3851490725611636 No HitACGAATGGTTTAGCGCCAGGTTCCACACGAACGTGCGTTCAACGTGACGG 247602 0.3666621066273818 No HitCGGCTTCGTCGGGAGACGCGTGACCGACGGTCCCCCCGGGACCCGACGGC 170383 0.25231213687083787 No Hit...```
###Code
# Imports
import pandas as pd
from Bio.Seq import Seq
from Bio.Blast import NCBIWWW
from Bio.Blast import NCBIXML
###Output
_____no_output_____
###Markdown
Here is an example output: | / | Title | Score | E-value | |------:|--------------------------------------------------:|--------:|-------------:| | 0 | Staphylococcus aureus... | 100.0 | 1.510770e-15 | | 1 | ... | ... | ... |
###Code
# amount of overrepresented sequences it should blast (exercise = 5 but that takes a while)
nr = 2
# The output of the fastQC process has already been filtered a bit
df_overrepr = pd.read_table('data/overrepresented_sequences.txt', sep='\t')
# Create df that will contain results
df_results = pd.DataFrame(columns=['Title','Score','E-value'])
# Only select 5 most overrepresented sequences
for i in range(nr-1):
# Extract sequence
seq = df_overrepr['#Sequence'][i]
print(f'Sequence: {seq}, at index: {i}')
# Make Seq for Biopython
seq_blast = Seq(seq)
# blast sequence
print("Blasting")
result_handle = NCBIWWW.qblast("blastn", "nt", seq_blast)
# Parse the blast handle and extract only first record (without writing it to a file first)
blast_record = NCBIXML.read(result_handle)
# Each record has
descr = blast_record.descriptions[0]
title = descr.title
score = descr.score
e = descr.e
df_results = df_results.append({'Title': title,
'Score': score,
'E-value': e}, ignore_index=True)
print(df_results)
df_results
###Output
_____no_output_____
###Markdown
9. Two more exercisesThe following two exercises are a bit longer and require a combination of the materials that we learned today (9.1) or dive into the world of proteins (9.2). The choice is yours as to which one might be more relevant. 9.1 Diagnosing Sickle Cell Anemia[This link](https://krother.gitbooks.io/biopython-tutorial/content/sicklecell.html) will bring you to a great example exercise from Kristian Rother that combines all of the things that we learned today. Your goal is to develop an experimental test that reveals whether a patient suffers from the hereditary disease sickle cell anemia. The test for diagnosis should use a restriction enzyme on a patients’ DNA sample. For the test to work, you need to know exactly what genetic difference to test against. In this tutorial, you will use Biopython to find out.The idea is to compare DNA and protein sequences of sickle cell and healthy globin, and to try out different restriction enzymes on them.This tutorial consists of four parts:1. Use the module Bio.Entrez to retrieve DNA and protein sequences from NCBI databases.2. Use the module Bio.SeqIO to read, write, and filter information in sequence files.3. Use the modules Bio.Seq and Bio.SeqRecord to extract exons, transcribe and translate them to protein sequences.4. Use the module re to identify restriction sites. Regular expressions are not part of the course.
###Code
# Check solutions with/at [email protected]
###Output
_____no_output_____
###Markdown
9.2 Protein plotsMake two 3D plots of protein structures using the matplotlib pyplot library. For this you can use a Biopython module to retrieve the protein's PDB data and another one to parse it. 1. The first one of the [human oxyhaemoglobin](https://www.rcsb.org/structure/1hho) chain A.2. The second one with the superposition of chain B on top of chain A.
###Code
# Plot 1
from Bio.PDB import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pdbl = PDBList()
pdbl.retrieve_pdb_file("1HHO")
parser = MMCIFParser()
struct = parser.get_structure("1HHO", "hh/1hho.cif")
# Part 1: Plot a structure
calphas = [res["CA"].get_coord() for res in struct[0]["A"] if "CA" in res]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot([xyz[0] for xyz in calphas],
[xyz[1] for xyz in calphas],
[xyz[2] for xyz in calphas], color='skyblue')
ax.view_init(30, 185)
plt.show()
# Part 2: Plot an aligned pair of structures
chain_A = [res["CA"] for res in struct[0]["A"] if "CA" in res]
chain_B = [res["CA"] for res in struct[0]["B"] if "CA" in res][:141] # Explain superposition
sup = Superimposer()
sup.set_atoms(chain_A, chain_B)
sup.apply(chain_B)
calphas_A = [atom.get_coord() for atom in chain_A]
calphas_B = [atom.get_coord() for atom in chain_B]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot([xyz[0] for xyz in calphas_A],
[xyz[1] for xyz in calphas_A],
[xyz[2] for xyz in calphas_A], color='green')
ax.plot([xyz[0] for xyz in calphas_B],
[xyz[1] for xyz in calphas_B],
[xyz[2] for xyz in calphas_B], color='red')
ax.view_init(30, 185)
plt.show()
###Output
_____no_output_____
###Markdown
Solutions| Chapter 2 | Chapter 3 | Chapter 4 | Chapter 5 | Chapter 6 | Chapter 7 | Chapter 8 | Chapter 9 ||-------------------------|-------------------------|-------------------------|-------------------------|-------------------------|-------------------------|-------------------------|-------------------------|| [2-1](Exercise-2-1) | [3-1](Exercise-3-1) | [4-1](Exercise-4-1) | [5-1](Exercise-5-1) | [6-1](Exercise-6-1) | [7-1](Exercise-7-1) | [8-1](Exercise-8-1) | [9-1](Exercise-9-1) || [2-2](Exercise-2-2) | [3-2](Exercise-3-2) | [4-2](Exercise-4-2) | [5-2](Exercise-5-2) | [6-2](Exercise-6-2) | [7-2](Exercise-7-2) | [8-2](Exercise-8-2) | [9-2](Exercise-9-2) || [2-3](Exercise-2-3) | [3-3](Exercise-3-3) | [4-3](Exercise-4-3) | [5-3](Exercise-5-3) | [6-3](Exercise-6-3) | [7-3](Exercise-7-3) | [8-3](Exercise-8-3) || [2-4](Exercise-2-4) | [3-4](Exercise-3-4) | [4-4](Exercise-4-4) | [5-4](Exercise-5-4) | [6-4](Exercise-6-4) | [7-4](Exercise-7-4) | [8-4](Exercise-8-4) || [2-5](Exercise-2-5) | [3-5](Exercise-3-5) | [4-5](Exercise-4-5) | [5-5](Exercise-5-5) | [6-5](Exercise-6-5) | | [8-5](Exercise-8-5) || [2-6](Exercise-2-6) | [3-6](Exercise-3-6) | | [5-6](Exercise-5-6) | [6-6](Exercise-6-6) | | || [2-7](Exercise-2-7) | | | [5-7](Exercise-5-7) | [6-7](Exercise-6-7) | | || [2-8](Exercise-2-8) || [2-9](Exercise-2-9) || [2-10](Exercise-2-10) || [2-11](Exercise-2-11) || [2-12](Exercise-2-12) || [2-13](Exercise-2-13) | --- Exercise 2-1What happens when you try to divide by zero?
###Code
5 / 0
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 2-2Introduce brackets into this expression to make it evaluate to 5.
###Code
(8 * 6 / 4 - 2) * 0.5
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 2-3What do you expect this expression to evaluate to?
###Code
3 + 'five'
###Output
_____no_output_____
###Markdown
You will receive an error from the Python interpreter. Something like: `TypeError: unsupported operand type(s) for +: 'int' and 'str'`. When this happens it is important to **read the error message**. It will help you to diagnose and fix the mistake you made. [&9166;](Solutions)--- Exercise 2-4Can you guess what the expression, `not False` will exaluate to?
###Code
not False == True
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 2-5Make the expression in the following code cell evaluate to `True`. What is the `!=` operator doing? The `!=` operator can be read as "not equal". So modifying the expression to what is below should evaluate to `True`.
###Code
"hello" != "world"
"hello" == "hello"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 2-6Predict the outcome of each of these operations before evaluating the cells to see if you're correct.
###Code
10 >= (5 + 5)
(1 + 2 + 3 + 4) == 10
abs(-22) != 1
"HELLO" == "hello"
len("hello") <= 4
"Guido " + "van Rossum" == 65
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 2-7Write a program where you ask the computer a question and the computer prints out the answer "42".
###Code
input("Ask me a question:")
print(42)
myNumber = float(input("Give me a number:"))
intPart = int(myNumber)
myNumber - intPart
###Output
Give me a number: 5.5
###Markdown
[&9166;](Solutions)--- Exercise 2-8Write a program that computes the area of a circle with radius given by the user.
###Code
from math import pi
radius = float(input("Radius of a circle:"))
pi * (radius ** 2)
###Output
Radius of a circle: 3.14159
###Markdown
[&9166;](Solutions)--- Exercise 2-9Can you compare strings? What does it mean for one string to be less than another string? What does the following expression evaluate to and why?
###Code
"three" > "six"
###Output
_____no_output_____
###Markdown
Strings **can** be compared. It is clear you might want to check if 2 strings are the same, e.g.```python"ATGC" == "ATTC"```Strings are compared **alphabetically**, `'t'` is later in the alphabet than `'s'` so `'t' > 's'`. [&9166;](Solutions)--- Exercise 2-10You know that the `*` symbol is the operator for multiplication, `+` is addition, `/` is division, etc. Here is a new operator, see if you can find out what it does:* `//` (e.g. `5 // 2`)
###Code
5 // 2 # Integer division (ignores the fractional part of the result)
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 2-11Use the `int()` function to convert fractional numbers (like `3.14159`) into integers.
###Code
int(3.14159)
int(-1.2345)
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 2-12Give the variable `n` a value and display `True` if `n` is even, otherwise display `False`
###Code
n = 5
n % 2 == 0
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 2-13Write a program to ask the user for their name, then print out a greeting using their name. You can use the `input()` and `print()` commands.
###Code
name = input("What is your name")
print("Hello", name)
###Output
What is your name James
###Markdown
[&9166;](Solutions)--- Exercise 3-1How would you test the following function. Write some tests, try to discover a bug.
###Code
def fractional_part(number):
"""Find the fractional part of an input floating point number."""
int_part = int(number) + 1
frac_part = int_part + number
return int_part
# Write your tests here
# Reading the function, I immediately suspect its returning me the integer part. Lets write a test for that hypothesis
assert 5 == fractional_part(5.5)
# No, but it seems to be adding 1 to the integer part. Let's check that:
assert 6 == fractional_part(5.5) # Yes, no output means that assertion was correct. Lets fix the function:
def fractional_part(number):
"""Find the fractional part of an input floating point number."""
int_part = int(number)
frac_part = int_part + number
return frac_part
assert 0.5 == fractional_part(5.5) # No this still fails. What's the fractional part computation doing?
assert 10.5 == fractional_part(5.5) # That succeeds. It's adding rather than subtracting. Lets fix that...
def fractional_part(number):
"""Find the fractional part of an input floating point number."""
int_part = int(number)
frac_part = number - int_part
return frac_part
assert 0 == fractional_part(0)
assert 0.5 == fractional_part(5.5)
assert 0.2 == fractional_part(-8.2)
###Output
_____no_output_____
###Markdown
We're nearly there. Just have to deal with negative values...
###Code
def fractional_part(number):
"""Find the fractional part of an input floating point number."""
int_part = abs(int(number))
frac_part = abs(number) - int_part
return frac_part
assert 0 == fractional_part(0)
assert 0.5 == fractional_part(5.5)
assert 0.2 == fractional_part(-8.2), f"{fractional_part(-8.2)}"
###Output
_____no_output_____
###Markdown
Ah! Remember floating point approximation! Fixing this is left as an exercise. [&9166;](Solutions)--- Exercise 3-2Write a function called `distance` that accepts 2 numbers called `x` and `y` as arguments and computes the euclidean distance of the coordinate $(x,y)$ from the origin $(0,0)$ $$d(x, y) = \sqrt{x^2 + y^2}$$
###Code
from math import sqrt
# Write your function here
def distance(x, y):
"Distance between coordinate (x,y) and origin."
return sqrt(x**2 + y**2)
assert 5 == distance(3, 4), f"The coordinate (3,4) is 5 units from the origin, not {distance(3,4)} units."
assert 13 == distance(5, 12), f"The coordinate (4,12) is 13 units from the origin, not {distance(5,12)} units."
assert 17 == distance(8, 15), f"The coordinate (8,15) is 17 units from the origin, not {distance(8,15)} units."
assert distance.__doc__ is not None, "You should write a docstring to help other use your function."
###Output
_____no_output_____
###Markdown
[&9166;](Solutions) --- Exercise 3-3Write a function called `l2norm` that computes the euclidean distance between any 2 arbitrary points in 3D space.$$\ell^{2}(\vec{a}, \vec{b}) = \sqrt{\sum_{k=1}^3(x_k - y_k)^2}$$
###Code
from math import sqrt
def l2norm(x1, y1, z1, x2, y2, z2):
return sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
assert l2norm(0, 0, 0, 0, 0, 0) == 0, f"Expected 0, got: {l2norm(0, 0, 0, 0, 0, 0)}"
assert l2norm(0, 0, 0, 0, 0, 1) == 1, f"Expected 1, got: {l2norm(0, 0, 0, 0, 0, 1)}"
assert l2norm(0, 0, 0, 0, -1, 0) == 1, f"Expected 1, got: {l2norm(0, 0, 0, 0, -1, 0)}"
assert l2norm(0, 0, 0, 2.2, 0, 0) == 2.2, f"Expected 0, got: {l2norm(0, 0, 0, 2.2, 0, 0)}"
assert l2norm(0, 0, -2.2, 0, 0, 0) == 2.2, f"Expected 0, got: {l2norm(0, 0, -2.2, 0, 0, 0)}"
assert l2norm(0, 5.1, 0, 0, 0, 0) == 5.1, f"Expected 0, got: {l2norm(0, 5.1, 0, 0, 0, 0)}"
assert l2norm(0.2, 0, 0, 0, 0, 0) == 0.2, f"Expected 0.1, got: {l2norm(0.2, 0, 0, 0, 0, 0)}"
assert l2norm(0, 0, 0, 3, 4, 12) == 13.0, f"Expected 0, got: {l2norm(0, 0, 0, 3, 4, 12)}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 3-4Since programs are just functions composed of functions and values, it is time for you to write a non-trivial program.Write a program called `ball_height` that accepts coordinates relative to a thrower (e.g. 5 metres east, and 7.5 metres south) and compute the height of a ball, thrown in their direction, when it reaches them. Hint: use the `ball_trajectory()` and distance functions we've defined above.
###Code
def ball_height(x, y):
"Ball height at coordinate (x,y) after being thrown from the origin."
return ball_trajectory(distance(x, y))
assert abs(ball_height(0.2, 0.9) - 7.5195) < 0.0001, f"Got:{ball_height(0.2, 0.9)}; Expected: 7.5195"
assert abs(ball_height(0.1, 0.75) - 6.4213) < 0.0001, f"Got:{ball_height(0.1, 0.75)}; Expected: 6.4213"
assert abs(ball_height(1, 2.5) - 12.4258) < 0.0001, f"Got:{ball_height(1, 2.5)}; Expected: 12.4258"
assert ball_height.__doc__ is not None, "You should write a docstring to help other use your function."
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 3-5Now that you've seen that functions and values can be composed to build programs lets write a function composing function called `compose()`
###Code
def compose(funA, funB):
"Compose 2 functions that take a single argument into a single function"
def composed(x):
return funA(funB(x))
return composed
assert (compose(str, len)("hello")) == "5" , f"I expected '5', got: {compose(str, len)('hello')}"
assert (compose(sum, list)({1, 3, 5})) == 9, f"I expected 9, got: {compose(sum, list)({1, 3, 5})}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 3-6Two functions are defined below that operate on text. The first, called `first()`, gets the first chatacter in the string. The second, called `rest()` drops the first character in the string and returns the "rest". Use `compose()` with these 2 functions (`first()` and `rest()`) to write a function to get the second character in any string.
###Code
def first(text):
"Return the first character in the input string"
return text[0]
def rest(text):
"Return the input string without the first character"
return text[1:]
second = compose(first, rest)
assert second("hello") == "e", f"Expected 'e', got: {second('hello')}"
assert second("at") == 't', f"Expected 't', got: {second('at')}"
###Output
_____no_output_____
###Markdown
What happends when you pass a single character to `second()`?
###Code
second('a')
rest('a')
###Output
_____no_output_____
###Markdown
The `rest()` function is called first with the argument 'a'. This returns the empty string `''` from which `first()` cannot get the first character. [&9166;](Solutions)--- Exercise 4-1Two functions are defined below that operate on text. The first, called `first()`, gets the first chatacter in the string. The second, called `rest()` drops the first character in the string and returns the "rest". Use `compose()` with these 2 functions (`first()` and `rest()`) to write a function to get the second character in any string.
###Code
def fizzbuzz1(number):
if number % 3 == 0 and number % 5 == 0:
return "Fizz Buzz"
if number % 3 == 0:
return "Fizz"
if number % 5 == 0:
return "Buzz"
print(fizzbuzz1(33)) # Should print "Fizz"
print(fizzbuzz1(30)) # Should print "Fizz Buzz"
print(fizzbuzz1(20)) # Should print "Buzz"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 4-2Can you spot (and fix!) the bug caused by incorrect grouping of code in the following snippet? What should the clamp_0_10() function do? if value is lower than 0 (e.g. -2 or -7), it will be overwritten by the value 0 if value is higher than 10 (e.g. 132 or 17), it will be overwritten by the value 10. otherwise, if value is between 0 and 10 it will be returned unchanged.
###Code
def clamp_0_10(value):
"Bound value between 0 and 10 inclusive"
minimum_value = 0
maximum_value = 10
if value < minimum_value:
print(f"{value} is too small")
value = minimum_value
if value > maximum_value:
print(f"{value} is too big")
value = maximum_value
return value
print(f"Input: -5, expected: 0, got: {clamp_0_10(-5)}")
print(f"Input: 0, expected: 0, got: {clamp_0_10(0)}")
print(f"Input: 5, expected: 5, got: {clamp_0_10(5)}")
print(f"Input: 10, expected: 10, got: {clamp_0_10(10)}")
print(f"Input: 11, expected: 10, got: {clamp_0_10(11)}")
###Output
-5 is too small
Input: -5, expected: 0, got: 0
Input: 0, expected: 0, got: 0
Input: 5, expected: 5, got: 5
Input: 10, expected: 10, got: 10
11 is too big
Input: 11, expected: 10, got: 10
###Markdown
[&9166;](Solutions)--- Exercise 4-3Write a program to compute division that checks for division by zero. Complete your code in the template provided below by replacing the `_` characters.
###Code
def division(x, y):
if y == 0:
return "You cannot divide by zero"
return x / y
print(division(1, 0)) # Should print "You cannot divide by zero"
print(division(1, 1)) # Should print 1.0
###Output
You cannot divide by zero
1.0
###Markdown
[&9166;](Solutions)--- Exercise 4-4Write a function that returns "odd" when its input is odd, and "even" otherwise. Also add you own test to ensure your answer is correct.
###Code
def even_or_odd(num):
if (num % 2) != 0:
return "odd"
return "even"
assert even_or_odd(1) == "odd"
assert even_or_odd(2) == "even"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 4-5Extend the fizzbuzz function you wrote earlier by returning the input number if it doesn't match any of the conditions.A reminder, if `number`* is a multiple of `3`, return `"Fizz"`* is a miltiple of `5`, return `"Buzz"`* is a multiple of `3` **and** `5`, return `"Fizz Buzz"`* otherwise, return numberUse the template provided below, replace the `_` characters.
###Code
def fizzbuzz2(number):
if number % 3 == 0 and number % 5 == 0:
return "Fizz Buzz"
elif number % 3 == 0:
return "Fizz"
elif number % 5 == 0:
return "Buzz"
else:
return number
print(fizzbuzz2(33)) # Should print "Fizz"
print(fizzbuzz2(30)) # Should print "Fizz Buzz"
print(fizzbuzz2(20)) # Should print "Buzz"
print(fizzbuzz2(16)) # Should print 16
###Output
Fizz
Fizz Buzz
Buzz
16
###Markdown
[&9166;](Solutions)--- Exercise 5-1Write a function called `fizzbuzz3()` that returns a list containing the FizzBuzz game played up to 15.
###Code
def fizzbuzz3():
"Play the Fizz Buzz game up to 15."
return [1, 2, "Fizz", 4, "Buzz", "Fizz", 7, 8, "Fizz", "Buzz", 11, "Fizz", 13, 14, "Fizz Buzz"]
fizzbuzz3()
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 5-2Write a function that accepts a nested list as an argument and returns the original list with second element reversed. For example, given input `[[1, 2], [3, 4]]`, return `[[1, 2], [4, 3]]`.
###Code
def reverse_second(arg):
"Reverse the second element of a nested list."
if len(arg) >= 2:
arg[1] = arg[1][::-1]
return arg
else:
return arg
assert reverse_second([[1]]) == [[1]], f"Expected [[1]], got: {reverse_second([[1]])}"
assert reverse_second([[1], [2]]) == [[1], [2]], f"Expected [[1], [2]], got {reverse_second([[1], [2]])}"
assert reverse_second([[1, 5], [10, 9, 8]]) == [[1, 5], [8, 9, 10]], f"Expected [[1, 5], [8, 9, 10]], got: {reverse_second([[1, 5], [10, 9, 8]])}"
assert reverse_second([1, ['h', 'e', 'l', 'l', 'o'], 2]) == [1, ['o', 'l', 'l', 'e', 'h'], 2], f"Expected [1, ['o', 'l', 'l', 'e', 'h'], 2], got: {reverse_second([1, ['h', 'e', 'l', 'l', 'o'], 2])}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 5-3Slicing presents a possible solution to playing the FizzBuzz game: slice starting at the 3$^{rd}$ element of a list with a `step` of 3, assign an appropriately sized list of `["Fizz"]` strings. The same for 5. Write a function, called `fizzbuzz4()` that attempts to play the Fizz Buzz game in this way. The input will be a list containing the counted numbers, you should use slicing to replace the appropriate numbers with `"Fizz"` or `"Buzz"`.
###Code
def fizzbuzz4(counted):
counted[2::3] = ["Fizz"] * (len(counted) // 3)
counted[4::5] = ["Buzz"] * (len(counted) // 5)
return counted
fizzbuzz4([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
###Output
_____no_output_____
###Markdown
Does `fizzbuzz4()` correctly play the Fizz Buzz game? What is wrong with this solution?No. The case where a number is a multiple of `3` **and** of `5` is not handled correctly. [&9166;](Solutions)--- Exercise 5-4Write a function that accepts a list of scores (highest is better) and return the score of third place (bronze medal).
###Code
def bronze_medal(scores):
return sorted(scores, reverse=True)[2]
assert bronze_medal([54,56,2,1,5223,6,23,57,3,7,3344]) == 57, f"Expected 57, got: {bronze_medal([54,56,2,1,5223,6,23,57,3,7,3344])}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 5-5You're given a list containing some data. Each element of the list is another list containing 2 values: the name of a country, and the average number of citations per citable document produced within that country. Write a function that takes this list as an argument and returns the name of the country with the second highest number of average citations.
###Code
data = [['Netherlands Antilles', 38.46],
['Tokelau', 51.9],
['Seychelles', 33.56],
['Anguilla', 133.98],
['Saint Lucia', 37.31],
['Panama', 37.87],
['Bermuda', 43.33],
['Federated States of Micronesia', 85.49],
['Gambia', 42.14],
['Belize', 41.59]]
from operator import itemgetter
def second_highest_citations(countries):
return sorted(countries, key=itemgetter(1), reverse=True)[1][0]
second_highest_citations(data)
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 5-6Write a function that takes 2 parameters: a string to truncate (`text`), and a maximum length (`max_len`) that checks if `text` is longer than `max_len`, if it is, the string should betruncated and display ellipses ("...") as the last 3 characters. The truncated string + the ellipses shouldfit within `max_len`.
###Code
def truncate(text, max_len):
if len(text) > max_len:
return text[:max_len-3] + "..."
return text
assert truncate("hello world", 10) == "hello w...", f"Expected 'hello w...', got: {truncate('hello world', 10)}"
assert truncate('python', 6) == 'python', f"Expected 'python', got: {truncate('python', 6)}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 5-7Write a function that takes a collection (a string, list, or tuple) and sort only the values excluding the first and last elements (so sort the middle elements) then return a the same as the input type with this structure:* A list should look like: `[first_element, ...sorted middle.., last_element]`* A tuple should look like: `(first_element, ...sorted middle.., last_element)`* A string should look like: `"first_character ...sorted middle... last_element)`* Otherwise (if the argument is not a list, tuple, or string) just return the argument unmodified.Fill in the template below. The string version has been completed for you:
###Code
def sorted_middle(collection):
"Sort a collection excluding the first and last elements"
if type(collection) == str:
return collection[0] + (''.join(sorted(collection[1:-1]))) + collection[-1]
if type(collection) == tuple:
return (collection[0],) + tuple(sorted(collection[1:-1])) + (collection[-1],)
if type(collection) == list:
return [collection[0]] + sorted(collection[1:-1]) + [collection[-1]]
return collection
assert sorted_middle("hello") == "hello", f"Expected 'hello', got: {sorted_middle('hello')}"
assert sorted_middle('sequence') == 'sceenque', f"Expected 'hello', got: {sorted_middle('sequence')}"
assert sorted_middle(["h", "e", "l", "l", "o"]) == ["h", "e", "l", "l", "o"], f"Expected ['h', 'e', 'l', 'l', 'o'], got: {sorted_middle(['h', 'e', 'l', 'l', 'o'])}"
assert sorted_middle(['s', 'e', 'q', 'u', 'e', 'n', 'c', 'e']) == ['s', 'c', 'e', 'e', 'n', 'q', 'u', 'e'], f"Expected ['s', 'c', 'e', 'e', 'n', 'q', 'u', 'e'], got: {sorted_middle(['s', 'e', 'q', 'u', 'e', 'n', 'c', 'e'])}"
assert sorted_middle(("h", "e", "l", "l", "o")) == ("h", "e", "l", "l", "o"), f"Expected ('h', 'e', 'l', 'l', 'o'), got: {sorted_middle(('h', 'e', 'l', 'l', 'o'))}"
assert sorted_middle(('s', 'e', 'q', 'u', 'e', 'n', 'c', 'e')) == ('s', 'c', 'e', 'e', 'n', 'q', 'u', 'e'), f"Expected ('s', 'c', 'e', 'e', 'n', 'q', 'u', 'e'), got: {sorted_middle(('s', 'e', 'q', 'u', 'e', 'n', 'c', 'e'))}"
assert sorted_middle(5) == 5, f"Expected 5, got: {sorted_middle(5)}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 6-1Write a function to play the fizzbuzz game up to `15`. You function should accept no arguments and return a list starting at `1` that tracks the progress of a game of Fizz Buzz.That is, your function should use a loop to produce this list: `[1, 2, 'Fizz', 4, 'Buzz', 'Fizz', 7, 8, 'Fizz', 'Buzz', 11, 'Fizz', 13, 14, 'Fizz Buzz']`
###Code
def fizzbuzz5():
game = []
for number in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]:
if number % 5 == 0 and number % 3 == 0:
game = game + ["Fizz Buzz"]
elif number % 5 == 0:
game = game + ["Buzz"]
elif number % 3 == 0:
game = game + ["Fizz"]
else:
game = game + [number]
return game
assert fizzbuzz5() == [1, 2, 'Fizz', 4, 'Buzz', 'Fizz', 7, 8, 'Fizz', 'Buzz', 11, 'Fizz', 13, 14, 'Fizz Buzz'], "Output is not correct :("
fizzbuzz5()
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 6-2Finally, we have all of the tools we need to write a concise program to play Fizz Buzz. Write a function called `fizzbuzz6` that takes an integer argument which is the number to count up to and returns a list starting at 1 tracking the progress of playing the Fizz Buzz game.
###Code
def fizzbuzz6(end):
"Play Fizz Buzz"
game = []
for count in range(1, end+1):
if count % 5 == 0 and count % 3 == 0:
game = game + ["Fizz Buzz"]
elif count % 5 == 0:
game = game + ["Buzz"]
elif count % 3 == 0:
game = game + ["Fizz"]
else:
game = game + [count]
return game
assert fizzbuzz6(0) == []
assert fizzbuzz6(2) == [1, 2]
assert fizzbuzz6(5) == [1, 2, 'Fizz', 4, 'Buzz']
assert fizzbuzz6(15) == [1, 2, 'Fizz', 4, 'Buzz', 'Fizz', 7, 8, 'Fizz', 'Buzz', 11, 'Fizz', 13, 14, 'Fizz Buzz']
fizzbuzz6(30)
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 6-3Write a function that computes the factorial of its argument.
###Code
def myfactorial(number):
factorial = 1
for i in range(1, number + 1):
factorial = factorial * i
return factorial
assert myfactorial(0) == 1, f"Expected 1, got: {myfactorial(0)}"
assert myfactorial(1) == 1, f"Expected 1, got: {myfactorial(1)}"
assert myfactorial(2) == 2, f"Expected 2, got: {myfactorial(2)}"
assert myfactorial(5) == 120, f"Expected 120, got: {myfactorial(5)}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 6-4Below is the definition of a function to sum numbers from 1 to the argument. You have 2 tasks:1. Identify and fix the bug(s) using techniques you have explored so far.1. Find the Python standard library function that does the same or similar thing (Hint: you can look through [this list](https://docs.python.org/3/library/functions.html)).
###Code
def mysum(end):
"Sum numbers [1-end] inclusive"
total = 0
for number in range(1, end + 1): #Change range() parameters
total += number # Use +=
return total
assert mysum(1) == sum(range(1, 2))
assert mysum(10) == sum(range(1, 11))
def mysum(end):
return sum(range(1, end))
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 6-5Once again, below is the deefinition of a function that takes a list of strings and joins them into a single string with comma seperators.Here are 2 examples of the expected output:```python>>> myjoin(["hello", "world"])"hello,world">>> myjoin(["sample1", "0.5"])"sample1,0.5"```Your task is to identify and fix the bug(s) using techniques you have explored so far. The Python function to do this looks like, `','.join(strings)`.
###Code
def myjoin(strings):
joined = ""
for string in strings[:-1]:
joined += string + ","
return joined + strings[-1]
assert myjoin(["hello", "world"]) == "hello,world"
assert myjoin(["sample1", "0.5"]) == "sample1,0.5"
','.join(strings)
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 6-6The Hamming distance between two strings of equal length is the number of positions at which the corresponding characters are different. In a more general context, the Hamming distance is one of several string metrics for measuring the _distance_ between two sequences. For example, the Hamming distance between:"karolin" and "kathrin" is 3.Write a function called "hamming_distance" which accepts two strings and returns the calculated hamming distance. If the lengths of the two strings is unequal, return the value `None`.
###Code
def hamming_distance(string1, string2):
"""Return the Hamming distance between equal-length sequences."""
# Start with a distance of zero, and count up
distance = 0
for s1,s2 in zip(string1,string2):
distance += (s1 != s2)
return distance
seq1 = "GATCATAGA"
seq2 = "CATCATACA"
print(hamming_distance(seq1,seq2))
###Output
2
###Markdown
[&9166;](Solutions)--- Exercise 6-7You're given a list containing some names and a corresponding list containing their rank by popularity.Write a function that takes a name as its argument, the behaviour of the function depends on 3 conditions:* If the name is in the `names` list and in the top 20 most popular names: Return a 2-tuple containing the name and rank (E.g. if the argument is "Elise", your function should return `("Elise", 14)`)* If the name is in the `names` list but not in the top 20 most popular names: Return the name reversed (E.g. if the argument is "Alexander", your function should return "rednaxelA")* Otherwise, return the name followed by the string `" is great at Python!"` (E.g. if the argument is "Hannah", your function should return "Hannah is great at Python!")
###Code
names = ["Marie", "Lucas", "Viktor", "Elise", "Lotte", "Hugo", "Emma", "Elena", "Julia", "Maxime", "Alexander", "Tuur", "Nina", "James"]
ranks = [7, 6, 65, 14, 46, 15, 1, 7, 26, 37, 36, 73, 25, 87]
def baby_name(name):
if name not in names:
return name + " is great at Python!"
for baby, rank in zip(names, ranks):
if baby == name:
if rank <= 20:
return (name, rank)
else:
return name[::-1]
assert baby_name("Elise") == ('Elise', 14), f"Expected ('Elise', 14), got: {baby_name('Elise')}"
assert baby_name("Alexander") == 'rednaxelA', f" Expected 'rednaxelA', got: {baby_name('Alexander')}"
assert baby_name("Hannah") == "Hannah is great at Python!", f"Expected 'Hannah is great at Python!', got: {baby_name('Hannah')}"
assert baby_name("Elena") == ('Elena', 7), f"Expected ('Elena', 7), got: {baby_name('Elena')}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 7-1Construct a dictionary containing the keys, "BEL", "RUS', "AUS", and "GLO" and decimal numbers for values (You can pick any number you like).**This is one of many valid solutions**
###Code
{
"BEL": 16.2,
"RUS": -9.1,
"AUS": 37.8,
"GLO": 22.4
}
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 7-2Write a function that takes a sequence of amino-acids represented by their single letter code. Return a list of three-letter-codes. For example, given the input: `"EIKGGQ"` return `['Glu', 'Ile', 'Lys', 'Gly', 'Gly', 'Gln']`
###Code
# You should already have this dictionary defined
three_letter_codes = {
'A': 'Ala',
'C': 'Cys',
'D': 'Asp',
'E': 'Glu',
'F': 'Phe',
'G': 'Gly',
'H': 'His',
'I': 'Ile',
'K': 'Lys',
'L': 'Leu',
'M': 'Met',
'N': 'Asn',
'P': 'Pro',
'Q': 'Gln',
'R': 'Arg',
'S': 'Ser',
'T': 'Thr',
'V': 'Val',
'W': 'Trp',
'Y': 'Tyr'}
def one_to_three_letter_code(sequence):
result = []
for aa in sequence:
result += [three_letter_codes[aa]]
return result
assert one_to_three_letter_code("EIKGGQ") == ['Glu', 'Ile', 'Lys', 'Gly', 'Gly', 'Gln'], f"Did not expect {one_to_three_letter_code('EIKGGQ')}"
assert one_to_three_letter_code("PRAPY") == ['Pro', 'Arg', 'Ala', 'Pro', 'Tyr'], f"Did not expect {one_to_three_letter_code('PRAPY')}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 7-3Write a function that accepts an amino-acid sequence and returns a dictionary keyed by the amino-acid single-letter-code and the value is the frequency of observation of that amino-acid in the sequence.For example, `frequencies("AACD")` should return this dictionary: `{"A": 2, "C": 1, "D": 1}`.
###Code
def frequencies1(sequence):
counts = {}
for residue in sequence:
if residue in counts:
counts[residue] += 1
else:
counts[residue] = 1
return counts
sequence = "SFTMHGTPVVNQVKVLTESNRISHHKILAIVGTAESNSEHPLGTAITKYCKQELDTETLGTCIDFQVVPGCGISCKVTNIEGLLHKNNWNIEDNNIKNASLVQIDASNEQSSTSSSMIIDAQISNALNAQQYKVLIGNREWMIRNGLVINNDVNDFMTEHERKGRTAVLVAVDDELCGLIAIADT"
expected = {'A': 12, 'C': 5, 'D': 10, 'E': 12, 'F': 3, 'G': 11, 'H': 6, 'I': 18, 'K': 9, 'L': 14, 'M': 4, 'N': 18, 'P': 3, 'Q': 8, 'R': 5, 'S': 14, 'T': 14, 'V': 15, 'W': 2, 'Y': 2}
assert frequencies1("AACD") == {"A": 2, "C": 1, "D": 1}, f"Unexpected {frequencies1('AACD')}"
assert frequencies1(sequence) == expected, f"Unexpected {frequencies1(sequence)}"
# Another possible solution
from collections import Counter
def frequencies2(sequence):
return dict(Counter(sequence))
assert frequencies2("AACD") == {"A": 2, "C": 1, "D": 1}, f"Unexpected {frequencies2('AACD')}"
assert frequencies2(sequence) == expected, f"Unexpected {frequencies2(sequence)}"
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 7-4Write a function that accepts a list of observations (See the dictionary you created in Exercise 7-1) and returns a dictionary with the same structure but the values are the averages of the input. For example, consider these input observations:```python[ {"BEL": 5.7, "RUS": -12.8, "AUS": 42.6, "GLO": 22.1}, {"BEL": 10.2, "RUS": -2.0, "AUS": 37.8, "GLO": 21.2}, {"BEL": 14.7, "RUS": 1.3, "AUS": 18.3, "GLO": 23.6}]```The output of your function should be:```python{"BEL": 10.2, "RUS": -4.5, "AUS": 32.9, "GLO": 22.3}```
###Code
def average_temperature(observations):
temp_sum = {"BEL": 0, "RUS": 0, "AUS": 0, "GLO": 0}
for observation in observations:
temp_sum["BEL"] += observation["BEL"]
temp_sum["RUS"] += observation["RUS"]
temp_sum["AUS"] += observation["AUS"]
temp_sum["GLO"] += observation["GLO"]
return {"BEL": temp_sum["BEL"] / len(observations),
"RUS": temp_sum["RUS"] / len(observations),
"AUS": temp_sum["AUS"] / len(observations),
"GLO": temp_sum["GLO"] / len(observations)
}
example = [
{"BEL": 5.7, "RUS": -12.8, "AUS": 42.6, "GLO": 22.1},
{"BEL": 10.2, "RUS": -2.0, "AUS": 37.8, "GLO": 21.2},
{"BEL": 14.7, "RUS": 1.3, "AUS": 18.3, "GLO": 23.6}
]
print(average_temperature(example))
assert average_temperature(example) == {"BEL": 10.2, "RUS": -4.5, "AUS": 32.9, "GLO": 22.3}, f"Got {average_temperature(example)}"
###Output
{'BEL': 10.2, 'RUS': -4.5, 'AUS': 32.9, 'GLO': 22.3}
###Markdown
[&9166;](Solutions)--- Exercise 8-1Write a function to _clean_ the data we read from the file. The input to your function will be a dictionary like this:```python{ 'Time': '1901-05', 'Anomaly (deg C)': '-0.24934465', 'Lower confidence limit (2.5%)': '-0.44425672', 'Upper confidence limit (97.5%)': '-0.05443258'}```The output of your function should look like this:```python{ 'Time': datetime.datetime(1901, 5, 1, 0, 0), 'Temperature': -0.24934465}```
###Code
def clean_global_data(reading):
"Clean a global data dictionary"
return {
'Time': datetime.strptime(reading['Time'], "%Y-%m"),
'Temperature': float(reading['Anomaly (deg C)'])
}
assert clean_global_data({'Time': '1901-05','Anomaly (deg C)': '-0.24934465','Lower confidence limit (2.5%)': '-0.44425672','Upper confidence limit (97.5%)': '-0.05443258'}) == {'Time': datetime(1901, 5, 1, 0, 0),'Temperature': -0.24934465}
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 8-2Write a function that reads the global temperature data file and returns a list of cleaned dictionaries.
###Code
def global_data(filename):
"Read global data into a list of cleaned dictionaries."
data = []
with open(filename) as global_data_manager:
reader = csv.DictReader(global_data_manager)
for reading in reader:
data += [clean_global_data(reading)]
return data
assert global_data("data/HadCRUT.5.0.1.0.analysis.summary_series.global.monthly.csv")[4] == {'Time': datetime(1901, 5, 1, 0, 0),'Temperature': -0.24934465}
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 8-3Write a function that takes a list of dictionaries as an argument and saves them to a CSV formatted file.
###Code
def save_data(data, filename):
"Save cleaned climate data into a CSV file"
with open(filename, mode="w") as file_resource:
writer = csv.DictWriter(file_resource, fieldnames=["Time", "Temperature"])
writer.writeheader()
writer.writerows(data)
glo = global_data("data/HadCRUT.5.0.1.0.analysis.summary_series.global.monthly.csv")
save_data(glo, "data/global_data.csv")
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 8-4Write a function to clean the country data we read from the web. The input to your function will be a dictionary like this:```python{ 'Temperature - (Celsius)': '5.76', 'Year': '1901', 'Statistics': 'May Average', 'Country': 'Belgium', 'ISO3': 'BEL'}```The output of your function should look like this:```python{ 'Time': datetime.datetime(1901, 5, 1, 0, 0), 'Temperature': 5.76}```
###Code
def clean_country_data(data):
"Clean a country data dictionary"
return {
'Time': datetime.strptime(data['Year'] + '-' + data['Statistics'][:3], "%Y-%b"),
'Temperature': float(data['Temperature - (Celsius)'])
}
assert clean_country_data(bel_data[0]) == {'Time': datetime(1901, 1, 1, 0, 0), 'Temperature': 4.04}
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 8-5: Load country dataWrite a function that accepts a URL and returns a list of dictionaries containing 'Time' and 'Temperature' keys. Use the second code cell to save the cleaned data using the `save_data()` function you wrote earlier.
###Code
def country_data(url):
"Read country data from a URL into a list of cleaned dictionaries."
data = []
with request.urlopen(url) as ctry_data_manager:
text_data = ctry_data_manager.read().decode().splitlines()
reader = csv.DictReader(text_data, skipinitialspace=True)
for reading in reader:
data += [clean_country_data(reading)]
return data
assert country_data("https://climateknowledgeportal.worldbank.org/api/data/get-download-data/historical/tas/1901-2020/BEL/Belgium")[4] == {'Time': datetime(1901, 5, 1, 0, 0),'Temperature': 9.15}
assert country_data("https://climateknowledgeportal.worldbank.org/api/data/get-download-data/historical/tas/1901-2020/RUS/Russia")[4] == {'Time': datetime(1901, 5, 1, 0, 0),'Temperature': 2.01}
assert country_data("https://climateknowledgeportal.worldbank.org/api/data/get-download-data/historical/tas/1901-2020/AUS/Australia")[4] == {'Time': datetime(1901, 5, 1, 0, 0),'Temperature': 18.02}
save_data(country_data("https://climateknowledgeportal.worldbank.org/api/data/get-download-data/historical/tas/1901-2020/BEL/Belgium"), "data/bel_data.csv")
save_data(country_data("https://climateknowledgeportal.worldbank.org/api/data/get-download-data/historical/tas/1901-2020/RUS/Russia"), "data/rus_data.csv")
save_data(country_data("https://climateknowledgeportal.worldbank.org/api/data/get-download-data/historical/tas/1901-2020/AUS/Australia"), "data/aus_data.csv")
###Output
_____no_output_____
###Markdown
[&9166;](Solutions)--- Exercise 9-1Check that the code below is correct then generate a plot with the smoothed data overlaid on the unsmoothed data.
###Code
def moving_average(data, window_size):
smoothed = []
for window_begin in range(len(data) - window_size + 1):
# list(range(window_begin, window_begin + window_size))]
temperatures = []
for index in range(window_begin, window_begin + window_size):
temperatures += [data[index]['Temperature']]
avg = sum(temperatures) / window_size
smoothed += [{
'Temperature': avg,
'Time': data[window_begin + (window_size // 2)]['Time']
}]
return smoothed
glo_average = moving_average(global_data, 24)
glo_avg_dates = []
glo_avg_temperatures = []
for reading in glo_average:
glo_avg_dates += [reading['Time']]
glo_avg_temperatures += [reading['Temperature']]
plt.figure()
plt.plot(dates, temperatures, color='lightblue', linewidth=3)
plt.plot(glo_avg_dates, glo_avg_temperatures, color='darkblue', linewidth=3)
plt.title("Global climate anomaly relative to 1960-1991 reference")
plt.xlabel("Time")
plt.ylabel("Temperature anomaly")
###Output
_____no_output_____
###Markdown
 [&9166;](Solutions)--- Exercise 9-2Create plots like you did above for each country. Remember that functions can save you for repeating work.
###Code
def extract_dates_and_temps(data):
dates = []
temperatures = []
for reading in data:
dates += [reading['Time']]
temperatures += [reading['Temperature']]
return (dates, temperatures)
def make_plot(title, y_label, data):
(dates, temps) = extract_dates_and_temps(data)
(avg_dates, avg_temps) = extract_dates_and_temps(moving_average(data, 24))
plt.figure()
plt.plot(dates, temps, color='lightblue', linewidth=3)
plt.plot(avg_dates, avg_temps, color='darkblue', linewidth=3)
plt.title(title)
plt.xlabel("Time")
plt.ylabel(y_label)
plt.show()
make_plot("Global climate anomaly relative to 1960-1991 reference", "Temperature anomaly", global_data)
###Output
_____no_output_____
###Markdown

###Code
make_plot("Belgium temperature", "Temperature", bel_data)
###Output
_____no_output_____
###Markdown

###Code
make_plot("Russia temperature", "Temperature", rus_data)
###Output
_____no_output_____
###Markdown

###Code
make_plot("Australia temperature", "Temperature", aus_data)
###Output
_____no_output_____ |
HubSpot/HubSpot_Get_Task.ipynb | ###Markdown
HubSpot - Get Task **Tags:** hubspot sales crm engagements task snippet json **Author:** [Alok Chilka](https://www.linkedin.com/in/calok64/) Input Import libraries
###Code
from datetime import datetime, timedelta
import requests, math
import json
###Output
_____no_output_____
###Markdown
Setup your HubSpot👉 Access your [HubSpot API key](https://knowledge.hubspot.com/integrations/how-do-i-get-my-hubspot-api-key)
###Code
HS_API_TOKEN = "YOUR_HUBSPOT_API_KEY"
###Output
_____no_output_____
###Markdown
Setup your task info
###Code
contact_id = 1551
owner_id = 111111086
# Time delay to get tasks created since N days, where N is no of days. For ex. Get tasks created since 1 day
time_delay = 10
#Number of tasks to be retrieved
no_of_tasks = 10
###Output
_____no_output_____
###Markdown
Model Function to get recent tasks
###Code
def get_task(contact_id,owner_id,time_delay,no_of_tasks):
"""
Engagement type = TASK
"""
# Calc timestamp
Previous_Date = datetime.now() - timedelta(days=time_delay)
Previous_tstamp = Previous_Date.timestamp() * 1000
Previous_tstamp = math.trunc(Previous_tstamp)
url = "https://api.hubapi.com/engagements/v1/engagements/recent/modified"
params = {"hapikey": HS_API_TOKEN,"since":Previous_tstamp,"count":no_of_tasks}
headers = {'Content-Type': "application/json"}
# Post requests
res = requests.get(url,headers=headers,params=params)
if res.status_code == 200:
res_json = res.json()
# Check requests
try:
res.raise_for_status()
except requests.HTTPError as e:
raise (e)
res_json = res.json()
return res_json
else:
print("Task not found")
###Output
_____no_output_____
###Markdown
Output Get Recent task
###Code
results = get_task(contact_id,owner_id,time_delay,no_of_tasks)
for key in results["results"]:
print("---------------")
print(key['engagement']['id'])
###Output
_____no_output_____
###Markdown
HubSpot - Get Task **Tags:** hubspot sales crm engagements task **Author:** [Alok Chilka](https://www.linkedin.com/in/calok64/) Input Import libraries
###Code
from datetime import datetime, timedelta
import requests, math
import json
###Output
_____no_output_____
###Markdown
Setup your HubSpot👉 Access your [HubSpot API key](https://knowledge.hubspot.com/integrations/how-do-i-get-my-hubspot-api-key)
###Code
HS_API_TOKEN = "YOUR_HUBSPOT_API_KEY"
###Output
_____no_output_____
###Markdown
Setup your task info
###Code
contact_id = 1551
owner_id = 111111086
# Time delay to get tasks created since N days, where N is no of days. For ex. Get tasks created since 1 day
time_delay = 10
#Number of tasks to be retrieved
no_of_tasks = 10
###Output
_____no_output_____
###Markdown
Model Function to get recent tasks
###Code
def get_task(contact_id,owner_id,time_delay,no_of_tasks):
"""
Engagement type = TASK
"""
# Calc timestamp
Previous_Date = datetime.now() - timedelta(days=time_delay)
Previous_tstamp = Previous_Date.timestamp() * 1000
Previous_tstamp = math.trunc(Previous_tstamp)
url = "https://api.hubapi.com/engagements/v1/engagements/recent/modified"
params = {"hapikey": HS_API_TOKEN,"since":Previous_tstamp,"count":no_of_tasks}
headers = {'Content-Type': "application/json"}
# Post requests
res = requests.get(url,headers=headers,params=params)
if res.status_code == 200:
res_json = res.json()
# Check requests
try:
res.raise_for_status()
except requests.HTTPError as e:
raise (e)
res_json = res.json()
return res_json
else:
print("Task not found")
###Output
_____no_output_____
###Markdown
Output Get Recent task
###Code
results = get_task(contact_id,owner_id,time_delay,no_of_tasks)
for key in results["results"]:
print("---------------")
print(key['engagement']['id'])
###Output
_____no_output_____
###Markdown
HubSpot - Get Task **Tags:** hubspot sales crm engagements task snippet json **Author:** [Alok Chilka](https://www.linkedin.com/in/calok64/) Input Import libraries
###Code
from datetime import datetime, timedelta
import requests, math
import json
###Output
_____no_output_____
###Markdown
Setup your HubSpot👉 Access your [HubSpot API key](https://knowledge.hubspot.com/integrations/how-do-i-get-my-hubspot-api-key)
###Code
HS_API_TOKEN = "YOUR_HUBSPOT_API_KEY"
###Output
_____no_output_____
###Markdown
Setup your task info
###Code
contact_id = 1551
owner_id = 111111086
# Time delay to get tasks created since N days, where N is no of days. For ex. Get tasks created since 1 day
time_delay = 10
#Number of tasks to be retrieved
no_of_tasks = 10
###Output
_____no_output_____
###Markdown
Model Function to get recent tasks
###Code
def get_task(contact_id,owner_id,time_delay,no_of_tasks):
"""
Engagement type = TASK
"""
# Calc timestamp
Previous_Date = datetime.now() - timedelta(days=time_delay)
Previous_tstamp = Previous_Date.timestamp() * 1000
Previous_tstamp = math.trunc(Previous_tstamp)
url = "https://api.hubapi.com/engagements/v1/engagements/recent/modified"
params = {"hapikey": HS_API_TOKEN,"since":Previous_tstamp,"count":no_of_tasks}
headers = {'Content-Type': "application/json"}
# Post requests
res = requests.get(url,headers=headers,params=params)
if res.status_code == 200:
res_json = res.json()
# Check requests
try:
res.raise_for_status()
except requests.HTTPError as e:
raise (e)
res_json = res.json()
return res_json
else:
print("Task not found")
###Output
_____no_output_____
###Markdown
Output Get Recent task
###Code
results = get_task(contact_id,owner_id,time_delay,no_of_tasks)
for key in results["results"]:
print("---------------")
print(key['engagement']['id'])
###Output
_____no_output_____ |
notebooks/general/Explaining Quantitative Measures of Fairness.ipynb | ###Markdown
Explaining Measures of Fairness with SHAPThis hands-on article connects explainable AI methods with fairness measures and shows how modern explainability methods can enhance the usefulness of quantitative fairness metrics. By using [SHAP](http://github.com/slundberg/shap) (a popular explainable AI tool) we can decompose measures of fairness and allocate responsibility for any observed disparity among each of the model's input features. Explaining these quantitative fairness metrics can reduce the concerning tendency to rely on them as opaque standards of fairness, and instead promote their informed use as tools for understanding how model behavior differs between groups.Quantitative fairness metrics seek to bring mathematical precision to the definition of fairness in machine learning [[1](https://books.google.com/books/about/The_Ethical_Algorithm.html?id=QmmtDwAAQBAJ&source=kp_book_description)]. Definitions of fairness however are deeply rooted in human ethical principles, and so on value judgements that often depend critically on the context in which a machine learning model is being used. This practical dependence on value judgements manifests itself in the mathematics of quantitative fairness measures as a set of trade-offs between sometimes mutually incompatible definitions of fairness [[2](https://arxiv.org/abs/1609.05807)]. Since fairness relies on context-dependent value judgements it is dangerous to treat quantitative fairness metrics as opaque black-box measures of fairness, since doing so may obscure important value judgment choices.<!--This article covers:1. How SHAP can be used to explain various measures of model fairness.2. What SHAP fairness explanations look like in various simulated scenarios.3. How introducing a protected feature can help distiguish between label bias vs. feature bias. 4. Things you can't learn from a SHAP fairness explanation.--> How SHAP can be used to explain various measures of model fairnessThis article is not about how to choose the "correct" measure of model fairness, but rather about explaining whichever metric you have chosen. Which fairness metric is most appropriate depends on the specifics of your context, such as what laws apply, how the output of the machine learning model impacts people, and what value you place on various outcomes and hence tradeoffs. Here we will use the classic [demographic parity](https://fairmlbook.org/classification.html) metric, since it is simple and closely connected to the legal notion of disparate impact. The same analysis can also be applied to other metrics such as [decision theory cost](https://arxiv.org/abs/1808.00023), [equalized odds](https://arxiv.org/pdf/1803.02453.pdf), [equal opportunity](https://ttic.uchicago.edu/~nati/Publications/HardtPriceSrebro2016.pdf), or [equal quality of service](https://github.com/fairlearn/fairlearn/blob/master/TERMINOLOGY.md). Demographic parity states that the output of the machine learning model should be equal between two or more groups. The demographic parity difference is then a measure of how much disparity there is between model outcomes in two groups of samples.**Since SHAP decomposes the model output into feature attributions with the same units as the original model output, we can first decompose the model output among each of the input features using SHAP, and then compute the demographic parity difference (or any other fairness metric) for each input feature seperately using the SHAP value for that feature.** Because the SHAP values sum up to the model's output, the sum of the demographic parity differences of the SHAP values also sum up to the demographic parity difference of the whole model.<!--To will not explainThe danger of treating quantitative fairness metrics as opaque, black-box measures of fairness is strikingly similar to a related problem of treating machine learning models themselves as opaque, black-box predictors. While using a black-box is reasonable in many cases, important problems and assumptions can often be hidden (and hence ignored) when users don't understand the reasons behind a model's behavior \cite{ribeiro2016should}. In response to this problem many explainable AI methods have been developed to help users understand the behavior of modern complex models \cite{vstrumbelj2014explaining,ribeiro2016should,lundberg2017unified}. Here we explore how to apply explainable AI methods to quantitative fairness metrics.--> What SHAP fairness explanations look like in various simulated scenariosTo help us explore the potential usefulness of explaining quantitative fairness metrics we consider a simple simulated scenario based on credit underwriting. In our simulation there are four underlying factors that drive the risk of default for a loan: income stability, income amount, spending restraint, and consistency. These underlying factors are not observed, but they variously influence four different observable features: job history, reported income, credit inquiries, and late payments. Using this simulation we generate random samples and then train a non-linear [XGBoost](https://xgboost.ai/) classifier to predict the probability of default. The same process also works for any other model type supported by SHAP, just remember that explanations of more complicated models hide more of the model's details.By introducing sex-specific reporting errors into a fully specified simulation we can observe how the biases caused by these errors are captured by our chosen fairness metric. In our simulated case the true labels (will default on a loan) are statistically independent of sex (the sensitive class we use to check for fairness). So any disparity between men and women means one or both groups are being modeled incorrectly due to feature measurement errors, labeling errors, or model errors. If the true labels you are predicting (which might be different than the training labels you have access to) are not statistically independent of the sensitive feature you are considering, then even a perfect model with no errors would fail demographic parity. In these cases fairness explanations can help you determine which sources of demographic disparity are valid.<!--This article explores how we can use modern explainable AI tools to enhance traditional quantitative measures of model fairness. It is practical and hands-on, so feel free to follow along in the associated [notebook]. I assume you have a basic understanding of how people measure fairness for machine learning models. If you have never before considered fairness in the context of machine learning, then I recommend starting with a basic introduction such as XXX. I am not writing this Here I do not beforeIt is not meant to be a definitite One futher disclaimer is that as the author of SHAP (a popular explainable AI tool) I am very familar with the strengths and weaknesses of explainable AI tools, but I do not consider myself a fairness expert. So consider this a thought-provoking guide on how explainable AI tools can enhance quantitative measures of model fairnessI consider myself fairly well informed about explainable AI, but I Questions about fairness and equal treatment naturally arise whenever the outputs of a machine learning model impact people. For sensitive use-cases such as credit underwriting or crime prediction there are even laws that govern certain aspects of fairness. While fairness issues are not new, the rising popularily of machine learning model Legal fairness protections are even legally encorced for sensitive use-cases such as credit underwriting or crime prediction, but is also important in many other situations such as quality of service, or you might not initially to consider whenever you are using m Quantifying the fairness of a machine learning model has recently received considerable attention in the research community, and many quantitative fairness metrics have been proposed. In parallel to this work on fairness, explaining the outputs of a machine learning model has also received considerable research attention. %Explainability is intricately connected to fairness, since good explanations enable users to understand a model's behavior and so judge its fairness.Here we connect explainability methods with fairness measures and show how recent explainability methods can enhance the usefulness of quantitative fairness metrics by decomposing them among the model's input features. Explaining quantitative fairness metrics can reduce our tendency to rely on them as opaque standards of fairness, and instead promote their informed use as tools for understanding model behavior between groups. This notebook explores how SHAP can be used to explain quantitative measures of fairness, and so enhance their usefulness. To do this we consider a simple simulated scenario based on credit underwriting. In the simulation below there are four underlying factors that drive the risk of default for a loan: income stability, income amount, spending restraint, and consistency. These underlying factors are not observed, but they influence four different observable features in various ways: job history, reported income, credit inquiries, and late payments. Using this simulation we generate random samples and then train a non-linear gradient boosting tree classifier to predict the probability of default.By introducing sex-specific reporting errors into the simulation we can observe how the biases caused by these errors are captured by fairness metrics. For this analysis we use the classic statistical parity metric, though the same analysis works with other metrics. Note that for a more detailed description of fairness metrics you can check out the [fairlearn package's documentation](https://github.com/fairlearn/fairlearn/blob/master/TERMINOLOGY.mdfairness-of-ai-systems).-->
###Code
# here we define a function that we can call to execute our simulation under
# a variety of different alternative scenarios
import scipy as sp
import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
import shap
%config InlineBackend.figure_format = 'retina'
def run_credit_experiment(N, job_history_sex_impact=0, reported_income_sex_impact=0, income_sex_impact=0,
late_payments_sex_impact=0, default_rate_sex_impact=0,
include_brandx_purchase_score=False, include_sex=False):
np.random.seed(0)
sex = np.random.randint(0, 2, N) == 1 # randomly half men and half women
# four hypothetical causal factors influence customer quality
# they are all scaled to the same units between 0-1
income_stability = np.random.rand(N)
income_amount = np.random.rand(N)
if income_sex_impact > 0:
income_amount -= income_sex_impact/90000 * sex * np.random.rand(N)
income_amount -= income_amount.min()
income_amount /= income_amount.max()
spending_restraint = np.random.rand(N)
consistency = np.random.rand(N)
# intuitively this product says that high customer quality comes from simultaneously
# being strong in all factors
customer_quality = income_stability * income_amount * spending_restraint * consistency
# job history is a random function of the underlying income stability feature
job_history = np.maximum(
10 * income_stability + 2 * np.random.rand(N) - job_history_sex_impact * sex * np.random.rand(N)
, 0)
# reported income is a random function of the underlying income amount feature
reported_income = np.maximum(
10000 + 90000*income_amount + np.random.randn(N) * 10000 - \
reported_income_sex_impact * sex * np.random.rand(N)
, 0)
# credit inquiries is a random function of the underlying spending restraint and income amount features
credit_inquiries = np.round(6 * np.maximum(-spending_restraint + income_amount, 0)) + \
np.round(np.random.rand(N) > 0.1)
# credit inquiries is a random function of the underlying consistency and income stability features
late_payments = np.maximum(
np.round(3 * np.maximum((1-consistency) + 0.2 * (1-income_stability), 0)) + \
np.round(np.random.rand(N) > 0.1) - np.round(late_payments_sex_impact * sex * np.random.rand(N))
, 0)
# bundle everything into a data frame and define the labels based on the default rate and customer quality
X = pd.DataFrame({
"Job history": job_history,
"Reported income": reported_income,
"Credit inquiries": credit_inquiries,
"Late payments": late_payments
})
default_rate = 0.40 + sex * default_rate_sex_impact
y = customer_quality < np.percentile(customer_quality, default_rate * 100)
if include_brandx_purchase_score:
brandx_purchase_score = sex + 0.8 * np.random.randn(N)
X["Brand X purchase score"] = brandx_purchase_score
if include_sex:
X["Sex"] = sex + 0
# build model
import xgboost
model = xgboost.XGBClassifier(max_depth=1, n_estimators=500, subsample=0.5, learning_rate=0.05)
model.fit(X, y)
# build explanation
import shap
explainer = shap.TreeExplainer(model, shap.sample(X, 100))
shap_values = explainer.shap_values(X)
return shap_values, sex, X, explainer.expected_value
###Output
_____no_output_____
###Markdown
<!-- Scenario A: No reporting errorsAs a baseline experiment we refrain from introducing any sex-specific reporting errors. This results in no significant statistical parity difference between the credit score of men and women:--> Scenario A: No reporting errorsOur first experiment is a simple baseline check where we refrain from introducing any sex-specific reporting errors. While we could use any model output to measure demographic parity, we use the continuous log-odds score from a binary XGBoost classifier. As expected, this baseline experiment results in no significant demographic parity difference between the credit scores of men and women. We can see this by plotting the difference between the average credit score for women and men as a bar plot and noting that zero is close to the margin of error (note that negative values mean women have a lower average predicted risk than men, and positive values mean that women have a higher average predicted risk than men):
###Code
N = 10000
shap_values_A, sex_A, X_A, ev_A = run_credit_experiment(N)
model_outputs_A = ev_A + shap_values_A.sum(1)
glabel = "Demographic parity difference\nof model output for women vs. men"
xmin = -0.8
xmax = 0.8
shap.group_difference_plot(shap_values_A.sum(1), sex_A, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
Now we can use SHAP to decompose the model output among each of the model's input features and then compute the demographic parity difference on the component attributed to each feature. As noted above, because the SHAP values sum up to the model's output, the sum of the demographic parity differences of the SHAP values for each feature sum up to the demographic parity difference of the whole model. This means that the sum of the bars below equals the bar above (the demographic parity difference of our baseline scenario model).
###Code
slabel = "Demographic parity difference\nof SHAP values for women vs. men"
shap.group_difference_plot(shap_values_A, sex_A, X_A.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario B: An under-reporting bias for women's incomeIn our baseline scenario we designed a simulation where sex had no impact on any of the features or labels used by the model. Here in scenario B we introduce an under-reporting bias for women's income into the simulation. The point here is not how realistic it would be for women's income to be under-reported in the real-world, but rather how we can identify that a sex-specific bias has been introduced and understand where it came from. By plotting the difference in average model output (default risk) between women and men we can see that the income under-reporting bias has created a significant demographic parity difference where women now have a higher risk of default than men:
###Code
shap_values_B, sex_B, X_B, ev_B = run_credit_experiment(N, reported_income_sex_impact=30000)
model_outputs_B = ev_B + shap_values_B.sum(1)
shap.group_difference_plot(shap_values_B.sum(1), sex_B, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
95%|=================== | 9542/10000 [00:11<00:00]
###Markdown
If this were a real application, this demographic parity difference might trigger an in-depth analysis of the model to determine what might be causing the disparity. While this investigation is challenging given just a single demographic parity difference value, it is much easier given the per-feature demographic parity decomposition based on SHAP. Using SHAP we can see there is a significant bias coming from the reported income feature that is increasing the risk of women disproportionately to men. This allows us to quickly identify which feature has the reporting bias that is causing our model to violate demographic parity:
###Code
shap.group_difference_plot(shap_values_B, sex_B, X_B.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
It is important to note at this point how our assumptions can impact the interpretation of SHAP fairness explanations. In our simulated scenario we know that women actually have identical income profiles to men, so when we see that the reported income feature is biased lower for women than for men, we know that has come from a bias in the measurement errors in the reported income feature. The best way to address this problem would be figure out how to debias the measurement errors in the reported income feature. Doing so would create a more accurate model that also has less demographic disparity. However, if we instead assume that women actually are making less money than men (and it is not just a reporting error), then we can't just "fix" the reported income feature. Instead we have to carefully consider how best to account for real differences in default risk between two protected groups. It is impossible to determine which of these two situations is happening using just the SHAP fairness explanation, since in both cases the reported income feature will be responsible for an observed disparity between the predicted risks of men and women. Scenario C: An under-reporting bias for women's late paymentsTo verify that SHAP demographic parity explanations can correctly detect disparities regardless of the direction of effect or source feature, we repeat our previous experiment but instead of an under-reporting bias for income, we introduce an under-reporting bias for women's late payment rates. This results in a significant demographic parity difference for the model's output where now women have a lower average default risk than men:
###Code
shap_values_C, sex_C, X_C, ev_C = run_credit_experiment(N, late_payments_sex_impact=2)
model_outputs_C = ev_C + shap_values_C.sum(1)
shap.group_difference_plot(shap_values_C.sum(1), sex_C, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
And as we would hope, the SHAP explanations correctly highlight the late payments feature as the cause of the model's demographic parity difference, as well as the direction of the effect:
###Code
shap.group_difference_plot(shap_values_C, sex_C, X_C.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario D: An under-reporting bias for women's default ratesThe experiments above focused on introducing reporting errors for specific input features. Next we consider what happens when we introduce reporting errors on the training labels through an under-reporting bias on women's default rates (which means defaults are less likely to be reported for women than men). Interestingly, for our simulated scenario this results in no significant demographic parity differences in the model's output:
###Code
shap_values_D, sex_D, X_D, ev_D = run_credit_experiment(N, default_rate_sex_impact=-0.1) # 20% change
model_outputs_D = ev_D + shap_values_D.sum(1)
shap.group_difference_plot(shap_values_D.sum(1), sex_D, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
We also see no evidence of any demographic parity differences in the SHAP explanations:
###Code
shap.group_difference_plot(shap_values_D, sex_D, X_D.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario E: An under-reporting bias for women's default rates, take 2It may at first be surprising that no demographic parity differences were caused when we introduced an under-reporting bias on women's default rates. This is because none of the four features in our simulation are significantly correlated with sex, so none of them could be effectively used to model the bias we introduced into the training labels. If we now instead provide a new feature (brand X purchase score) to the model that is correlated with sex, then we see a demographic parity difference emerge as that feature is used by the model to capture the sex-specific bias in the training labels:
###Code
shap_values_E, sex_E, X_E, ev_E = run_credit_experiment(
N, default_rate_sex_impact=-0.1, include_brandx_purchase_score=True
)
model_outputs_E = ev_E + shap_values_E.sum(1)
shap.group_difference_plot(shap_values_E.sum(1), sex_E, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
98%|===================| 9794/10000 [00:11<00:00]
###Markdown
When we explain the demographic parity difference with SHAP we see that, as expected, the brand X purchase score feature drives the difference. In this case it is not because we have a bias in how we measure the brand X purchase score feature, but rather because we have a bias in our training label that gets captured by any input features that are sufficiently correlated with sex:
###Code
shap.group_difference_plot(shap_values_E, sex_E, X_E.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario F: Teasing apart multiple under-reporting biasesWhen there is a single cause of reporting bias then both the classic demographic parity test on the model's output, and the SHAP explanation of the demographic parity test capture the same bias effect (though the SHAP explanation can often have more statistical significance since it isolates the feature causing the bias). But what happens when there are multiple causes of bias occurring in a dataset? In this experiment we introduce two such biases, an under-reporting of women's default rates, and an under-reporting of women's job history. These biases tend to offset each other in the global average and so a demographic parity test on the model's output shows no measurable disparity:
###Code
shap_values_F, sex_F, X_F, ev_F = run_credit_experiment(
N, default_rate_sex_impact=-0.1, include_brandx_purchase_score=True,
job_history_sex_impact=2
)
model_outputs_F = ev_F + shap_values_F.sum(1)
shap.group_difference_plot(shap_values_F.sum(1), sex_F, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
100%|===================| 9996/10000 [00:11<00:00]
###Markdown
However, if we look at the SHAP explanation of the demographic parity difference we clearly see both (counteracting) biases:
###Code
shap.group_difference_plot(shap_values_F, sex_F, X_F.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Identifying multiple potentially offsetting bias effects can be important since while on average there is no disparate impact on men or women, there is disparate impact on individuals. For example, in this simulation women who have not shopped at brand X will receive a lower credit score than they should have because of the bias present in job history reporting. How introducing a protected feature can help distinguish between label bias and feature biasIn scenario F we were able to pick apart two distict forms of bias, one coming from job history under-reporting and one coming from default rate under-reporting. However, the bias from default rate under-reporting was not attributed to the default rate label, but rather to the brand X purchase score feature that happened to be correlated with sex. This still leaves us with some uncertainty about the true sources of demographic parity differences, since any difference attributed to an input feature could be due to an issue with that feature, or due to an issue with the training labels.It turns out that in this case we can help disentangle label bias from feature bias by introducing sex as a variable directly into the model. The goal of introducing sex as an input feature is to cause the label bias to fall entirely on the sex feature, leaving the feature biases untouched. So we can then distinguish between label biases and feature biases by comparing the results of scenario F above to our new scenario G below. This of course creates an even stronger demographic parity difference than we had before, but that is fine since our goal here is not bias mitigation, but rather bias understanding.
###Code
shap_values_G, sex_G, X_G, ev_G = run_credit_experiment(
N, default_rate_sex_impact=-0.1, include_brandx_purchase_score=True,
job_history_sex_impact=2, include_sex=True
)
model_outputs_G = ev_G + shap_values_G.sum(1)
shap.group_difference_plot(shap_values_G.sum(1), sex_G, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
97%|=================== | 9720/10000 [00:11<00:00]
###Markdown
The SHAP explanation for scenario G shows that all of the demographic parity difference that used to be attached to the brand X purchase score feature in scenario F has now moved to the sex feature, while none of the demographic parity difference attached to the job history feature in scenario F has moved. This can be interpreted to mean that all of the disparity attributed to brand X purchase score in scenario F was due to label bias, while all of the disparity attributed to job history in scenario F was due to feature bias.
###Code
shap.group_difference_plot(shap_values_G, sex_G, X_G.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Explaining Measures of Fairness with SHAPThis hands-on article connects explainable AI methods with fairness measures and shows how modern explainability methods can enhance the usefulness of quantitative fairness metrics. By using [SHAP](http://github.com/slundberg/shap) (a popular explainable AI tool) we can decompose measures of fairness and allocate responsibility for any observed disparity among each of the model's input features. Explaining these quantitative fairness metrics can reduce the concerning tendency to rely on them as opaque standards of fairness, and instead promote their informed use as tools for understanding how model behavior differs between groups.Quantitative fairness metrics seek to bring mathematical precision to the definition of fairness in machine learning [[1](https://books.google.com/books/about/The_Ethical_Algorithm.html?id=QmmtDwAAQBAJ&source=kp_book_description)]. Definitions of fairness however are deeply rooted in human ethical principles, and so on value judgements that often depend critically on the context in which a machine learning model is being used. This practical dependence on value judgements manifests itself in the mathematics of quantitative fairness measures as a set of trade-offs between sometimes mutually incompatible definitions of fairness [[2](https://arxiv.org/abs/1609.05807)]. Since fairness relies on context-dependent value judgements it is dangerous to treat quantitative fairness metrics as opaque black-box measures of fairness [[3](https://arxiv.org/abs/1808.00023)], since doing so may obscure important value judgment choices.<!--This article covers:1. How SHAP can be used to explain various measures of model fairness.2. What SHAP fairness explanations look like in various simulated scenarios.3. How introducing a protected feature can help distiguish between label bias vs. feature bias. 4. Things you can't learn from a SHAP fairness explanation.--> How SHAP can be used to explain various measures of model fairnessThis article is not about how to choose the correct measure of model fairness, but rather about explaining whatever metrics you have chosen. Which fairness metric is most appropriate depends the specifics of your context, such as what laws apply, how the outputs of the machine learning model impact people, and what value you place on various outcomes and hence tradeoffs. In many real-world situations it may be best to define your own fairness metric based on the values/costs of various potential outcomes that depend on the model's output. For this analysis we use the classic [demographic parity](https://arxiv.org/pdf/1803.02453.pdf) metric, though the same analysis works with other metrics such as [equalized odds](https://arxiv.org/pdf/1803.02453.pdf), [equal opportunity](https://ttic.uchicago.edu/~nati/Publications/HardtPriceSrebro2016.pdf), or [equal quality of service](https://github.com/fairlearn/fairlearn/blob/master/TERMINOLOGY.md). Demographic parity states that the output of the machine learning model should be equal between two or more groups. The demographic parity difference is then a measure of how much disparity there is between model outcomes in two groups of samples. We use demographic parity for this scenario not because it is the right measure of fairness in most situations (it is often not), but because it is straightforward and commonly known. In our simulated case demographic parity is appropriate because in our simulation the true labels (will default on a loan) are statistically independent of sex (the sensitive class we use to check for fairness). So any disparity between men and women means one or both groups are being modeled incorrectly due to feature errors, labeling errors, or model errors.**Since SHAP decomposes the model output into feature attributions with the same units as the original model output, we can first decompose the model output among each of the input features using SHAP, and then compute the demographic parity difference (or any other fairness metric) for each input feature seperately using the SHAP value for that feature.** Because the SHAP values sum up to the model's output, the sum of the demographic parity differences of the SHAP values also sum up to the demographic parity difference of the whole model.<!--To will not explainThe danger of treating quantitative fairness metrics as opaque, black-box measures of fairness is strikingly similar to a related problem of treating machine learning models themselves as opaque, black-box predictors. While using a black-box is reasonable in many cases, important problems and assumptions can often be hidden (and hence ignored) when users don't understand the reasons behind a model's behavior \cite{ribeiro2016should}. In response to this problem many explainable AI methods have been developed to help users understand the behavior of modern complex models \cite{vstrumbelj2014explaining,ribeiro2016should,lundberg2017unified}. Here we explore how to apply explainable AI methods to quantitative fairness metrics.--> What SHAP fairness explanations look like in various simulated scenariosTo help us explore the potential usefulness of explaining quantitative fairness metrics we consider a simple simulated scenario based on credit underwriting. In our simulation there are four underlying factors that drive the risk of default for a loan: income stability, income amount, spending restraint, and consistency. These underlying factors are not observed, but they variously influence four different observable features: job history, reported income, credit inquiries, and late payments. Using this simulation we generate random samples and then train a non-linear [XGBoost](https://xgboost.ai/) classifier to predict the probability of default. The same process also works for any other model type supported by SHAP, just remember that explanations of more complicated models hide more of the model's details.By introducing sex-specific reporting errors into a fully specified simulation we can observe how the biases caused by these errors are captured by fairness metrics. In our simulated case we can appropriatly use the demographic parity metric because in our simulation the true labels (will default on a loan) are statistically independent of sex (the sensitive class we use to check for fairness). If the true labels you are predicting (which might be different than the training labels you have access to) are not (marginally) statistically independent of the sensitive feature you are considering, then even a perfect model with no errors would fail demographic parity, and so you will likely want to consider other measures of fairness.<!--This article explores how we can use modern explainable AI tools to enhance traditional quantitative measures of model fairness. It is practical and hands-on, so feel free to follow along in the associated [notebook]. I assume you have a basic understanding of how people measure fairness for machine learning models. If you have never before considered fairness in the context of machine learning, then I recommend starting with a basic introduction such as XXX. I am not writing this Here I do not beforeIt is not meant to be a definitite One futher disclaimer is that as the author of SHAP (a popular explainable AI tool) I am very familar with the strengths and weaknesses of explainable AI tools, but I do not consider myself a fairness expert. So consider this a thought-provoking guide on how explainable AI tools can enhance quantitative measures of model fairnessI consider myself fairly well informed about explainable AI, but I Questions about fairness and equal treatment naturally arise whenever the outputs of a machine learning model impact people. For sensitive use-cases such as credit underwriting or crime prediction there are even laws that govern certain aspects of fairness. While fairness issues are not new, the rising popularily of machine learning model Legal fairness protections are even legally encorced for sensitive use-cases such as credit underwriting or crime prediction, but is also important in many other situations such as quality of service, or you might not initially to consider whenever you are using m Quantifying the fairness of a machine learning model has recently received considerable attention in the research community, and many quantitative fairness metrics have been proposed. In parallel to this work on fairness, explaining the outputs of a machine learning model has also received considerable research attention. %Explainability is intricately connected to fairness, since good explanations enable users to understand a model's behavior and so judge its fairness.Here we connect explainability methods with fairness measures and show how recent explainability methods can enhance the usefulness of quantitative fairness metrics by decomposing them among the model's input features. Explaining quantitative fairness metrics can reduce our tendency to rely on them as opaque standards of fairness, and instead promote their informed use as tools for understanding model behavior between groups. This notebook explores how SHAP can be used to explain quantitative measures of fairness, and so enhance their usefulness. To do this we consider a simple simulated scenario based on credit underwriting. In the simulation below there are four underlying factors that drive the risk of default for a loan: income stability, income amount, spending restraint, and consistency. These underlying factors are not observed, but they influence four different observable features in various ways: job history, reported income, credit inquiries, and late payments. Using this simulation we generate random samples and then train a non-linear gradient boosting tree classifier to predict the probability of default.By introducing sex-specific reporting errors into the simulation we can observe how the biases caused by these errors are captured by fairness metrics. For this analysis we use the classic statistical parity metric, though the same analysis works with other metrics. Note that for a more detailed description of fairness metrics you can check out the [fairlearn package's documentation](https://github.com/fairlearn/fairlearn/blob/master/TERMINOLOGY.mdfairness-of-ai-systems).-->
###Code
# here we define a function that we can call to execute our simulation under
# a variety of different alternative scenarios
import scipy as sp
import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
import shap
%config InlineBackend.figure_format = 'retina'
def run_credit_experiment(N, job_history_sex_impact=0, reported_income_sex_impact=0, income_sex_impact=0,
late_payments_sex_impact=0, default_rate_sex_impact=0,
include_brandx_purchase_score=False, include_sex=False):
np.random.seed(0)
sex = np.random.randint(0, 2, N) == 1 # randomly half men and half women
# four hypothetical causal factors influence customer quality
# they are all scaled to the same units between 0-1
income_stability = np.random.rand(N)
income_amount = np.random.rand(N)
if income_sex_impact > 0:
income_amount -= income_sex_impact/90000 * sex * np.random.rand(N)
income_amount -= income_amount.min()
income_amount /= income_amount.max()
spending_restraint = np.random.rand(N)
consistency = np.random.rand(N)
# intuitively this product says that high customer quality comes from simultaneously
# being strong in all factors
customer_quality = income_stability * income_amount * spending_restraint * consistency
# job history is a random function of the underlying income stability feature
job_history = np.maximum(
10 * income_stability + 2 * np.random.rand(N) - job_history_sex_impact * sex * np.random.rand(N)
, 0)
# reported income is a random function of the underlying income amount feature
reported_income = np.maximum(
10000 + 90000*income_amount + np.random.randn(N) * 10000 - \
reported_income_sex_impact * sex * np.random.rand(N)
, 0)
# credit inquiries is a random function of the underlying spending restraint and income amount features
credit_inquiries = np.round(6 * np.maximum(-spending_restraint + income_amount, 0)) + \
np.round(np.random.rand(N) > 0.1)
# credit inquiries is a random function of the underlying consistency and income stability features
late_payments = np.maximum(
np.round(3 * np.maximum((1-consistency) + 0.2 * (1-income_stability), 0)) + \
np.round(np.random.rand(N) > 0.1) - np.round(late_payments_sex_impact * sex * np.random.rand(N))
, 0)
# bundle everything into a data frame and define the labels based on the default rate and customer quality
X = pd.DataFrame({
"Job history": job_history,
"Reported income": reported_income,
"Credit inquiries": credit_inquiries,
"Late payments": late_payments
})
default_rate = 0.40 + sex * default_rate_sex_impact
y = customer_quality < np.percentile(customer_quality, default_rate * 100)
if include_brandx_purchase_score:
brandx_purchase_score = sex + 0.8 * np.random.randn(N)
X["Brand X purchase score"] = brandx_purchase_score
if include_sex:
X["Sex"] = sex + 0
# build model
import xgboost
model = xgboost.XGBClassifier(max_depth=1, n_estimators=500, subsample=0.5, learning_rate=0.05)
model.fit(X, y)
# build explanation
import shap
explainer = shap.TreeExplainer(model, shap.sample(X, 100))
shap_values = explainer.shap_values(X)
return shap_values, sex, X, explainer.expected_value
###Output
_____no_output_____
###Markdown
<!-- Scenario A: No reporting errorsAs a baseline experiment we refrain from introducing any sex-specific reporting errors. This results in no significant statistical parity difference between the credit score of men and women:--> Scenario A: No reporting errorsOur first experiment is a simple baseline check where we refrain from introducing any sex-specific reporting errors. While we could use any model output to measure demographic parity, we use the continuous log-odds score from a binary XGBoost classifier. As expected, this baseline experiment results in no significant demographic parity difference between the credit scores of men and women. We can see this by plotting the difference between the average credit score for women and men as a bar plot and noting that zero is close to the margin of error (note that negative values mean women have a lower average predicted risk than men, and positive values mean that women have a higher average predicted risk than men):
###Code
N = 10000
shap_values_A, sex_A, X_A, ev_A = run_credit_experiment(N)
model_outputs_A = ev_A + shap_values_A.sum(1)
glabel = "Demographic parity difference\nof model output for women vs. men"
xmin = -0.8
xmax = 0.8
shap.group_difference_plot(shap_values_A.sum(1), sex_A, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
Now we can use SHAP to decompose the model output among each of the model's input features and then compute the demographic parity difference on the component attributed to each feature. As noted above, because the SHAP values sum up to the model's output, the sum of the demographic parity differences of the SHAP values for each feature sum up to the demographic parity difference of the whole model. This means that the sum of the bars below equals the bar above (the demographic parity difference of our baseline scenario model).
###Code
slabel = "Demographic parity difference\nof SHAP values for women vs. men"
shap.group_difference_plot(shap_values_A, sex_A, X_A.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario B: An under-reporting bias for women's incomeIn our baseline scenario we designed a simulation where sex had no impact on any of the features or labels used by the model. Here in scenario B we introduce an under-reporting bias for women's income into the simulation. The point here is not how realistic it would be for women's income to be under-reported in the real-world, but rather how we can identify that a sex-specific bias has been introduced and understand where it came from. By plotting the difference in average model output (default risk) between women and men we can see that the income under-reporting bias has created a significant demographic parity difference where women now have a higher risk of default than men:
###Code
shap_values_B, sex_B, X_B, ev_B = run_credit_experiment(N, reported_income_sex_impact=30000)
model_outputs_B = ev_B + shap_values_B.sum(1)
shap.group_difference_plot(shap_values_B.sum(1), sex_B, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
95%|=================== | 9542/10000 [00:11<00:00]
###Markdown
If this were a real application, this demographic parity difference might trigger an in-depth analysis of the model to determine what might be causing the disparity. While this investigation is challenging given just a single demographic parity difference value, it is much easier given the per-feature statistical parity decomposition based on SHAP. Using SHAP we can see there is a significant bias coming from the reported income feature that is increasing the risk of women disproportionately to men. This allows us to quickly identify which feature has the reporting bias that is causing our model to violate demographic parity:
###Code
shap.group_difference_plot(shap_values_B, sex_B, X_B.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
It is important to note at this point how our assumptions can impact the interpretation of SHAP fairness explanations. In our simulated scenario we know that women actually have identical income profiles to men, so when we see that the reported income feature is biased lower for women than for men, we know that has come from a bias in the measurement errors in the reported income feature. The best way to address this problem would be figure out how to debias the measurement errors in the reported income feature. Doing so would create a more accurate model that also has less demographic disparity. However, if we instead assume that women actually are making less money than men (and it is not just a reporting error), then we can't just "fix" the reported income feature. Instead we have to carefully consider how best to account for real differences in default risk between two protected groups. It is impossible to determine which of these two situations is happening using just the SHAP fairness explanation, since in both cases the reported income feature will be responsible for an observed disparity between the predicted risks of men and women. Scenario C: An under-reporting bias for women's late paymentsTo verify that SHAP demographic parity explanations can correctly detect disparities regardless of the direction of effect or source feature, we repeat our previous experiment but instead of an under-reporting bias for income, we introduce an under-reporting bias for women's late payment rates. This results in a significant demographic parity difference for the model's output where now women have a lower average default risk than men:
###Code
shap_values_C, sex_C, X_C, ev_C = run_credit_experiment(N, late_payments_sex_impact=2)
model_outputs_C = ev_C + shap_values_C.sum(1)
shap.group_difference_plot(shap_values_C.sum(1), sex_C, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
And as we would hope, the SHAP explanations correctly highlight the late payments feature as the cause of the model's demographic parity difference, as well as the direction of the effect:
###Code
shap.group_difference_plot(shap_values_C, sex_C, X_C.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario D: An under-reporting bias for women's default ratesThe experiments above focused on introducing reporting errors for specific input features. Next we consider what happens when we introduce reporting errors on the training labels through an under-reporting bias on women's default rates (which means defaults are less likely to be reported for women than men). Interestingly, for our simulated scenario this results in no significant demographic parity differences in the model's output:
###Code
shap_values_D, sex_D, X_D, ev_D = run_credit_experiment(N, default_rate_sex_impact=-0.1) # 20% change
model_outputs_D = ev_D + shap_values_D.sum(1)
shap.group_difference_plot(shap_values_D.sum(1), sex_D, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
We also see no evidence of any demographic parity differences in the SHAP explanations:
###Code
shap.group_difference_plot(shap_values_D, sex_D, X_D.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario E: An under-reporting bias for women's default rates, take 2It may at first be surprising that no statistical parity differences were caused when we introduced an under-reporting bias on women's default rates. This is because none of the four features in our simulation are significantly correlated with sex, so none of them could be effectively used to model the bias we introduced into the training labels. If we now instead provide a new feature (brand X purchase score) to the model that is correlated with sex, then we see a statistical parity difference emerge as that feature is used by the model to capture the sex-specific bias in the training labels:
###Code
shap_values_E, sex_E, X_E, ev_E = run_credit_experiment(
N, default_rate_sex_impact=-0.1, include_brandx_purchase_score=True
)
model_outputs_E = ev_E + shap_values_E.sum(1)
shap.group_difference_plot(shap_values_E.sum(1), sex_E, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
98%|===================| 9794/10000 [00:11<00:00]
###Markdown
When we explain the statistical parity difference with SHAP we see that, as expected, the brand X purchase score feature drives the difference. In this case it is not because we have a bias in how we measure the brand X purchase score feature, but rather because we have a bias in our training label that gets captured by any input features that are sufficiently correlated with sex:
###Code
shap.group_difference_plot(shap_values_E, sex_E, X_E.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario F: Teasing apart multiple under-reporting biasesWhen there is a single cause of reporting bias then both the classic demographic parity test on the model's output, and the SHAP explanation of the demographic parity test capture the same bias effect (though the SHAP explanation can often have more statistical significance since it isolates the feature causing the bias). But what happens when there are multiple causes of bias occurring in a dataset? In this experiment we introduce two such biases, an under-reporting of women's default rates, and an under-reporting of women's job history. These biases tend to offset each other in the global average and so a statistical parity test on the model's output shows no measurable disparity:
###Code
shap_values_F, sex_F, X_F, ev_F = run_credit_experiment(
N, default_rate_sex_impact=-0.1, include_brandx_purchase_score=True,
job_history_sex_impact=2
)
model_outputs_F = ev_F + shap_values_F.sum(1)
shap.group_difference_plot(shap_values_F.sum(1), sex_F, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
100%|===================| 9996/10000 [00:11<00:00]
###Markdown
However, if we look at the SHAP explanation of the statistical parity difference we clearly see both (counteracting) biases:
###Code
shap.group_difference_plot(shap_values_F, sex_F, X_F.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Identifying multiple potentially offsetting bias effects can be important since while on average there is no disparate impact on men or women, there is disparate impact on individuals. For example, in this simulation women who have not shopped at brand X will receive a lower credit score than they should have because of the bias present in job history reporting. How introducing a protected feature can help distinguish between label bias and feature biasIn scenario F we were able to pick apart two distict forms of bias, one coming from job history under-reporting and one coming from default rate under-reporting. However, the bias from default rate under-reporting was not attributed to the default rate label, but rather to the brand X purchase score feature that happened to be correlated with sex. This still leaves us with some uncertainty about the true sources of statistical parity differences, since any difference attributed to an input feature could be due to an issue with that feature, or due to an issue with the training labels.It turns out that in this case we can help disentangle label bias from feature bias by introducing sex as a variable directly into the model. The goal of introducing sex as an input feature is to cause the label bias to fall entirely on the sex feature, leaving the feature biases untouched. So we can then distinguish between label biases and feature biases by comparing the results of scenario F above to our new scenario G below. This of course creates an even stronger demographic parity difference than we had before, but that is fine since our goal here is not bias mitigation, but rather bias understanding.
###Code
shap_values_G, sex_G, X_G, ev_G = run_credit_experiment(
N, default_rate_sex_impact=-0.1, include_brandx_purchase_score=True,
job_history_sex_impact=2, include_sex=True
)
model_outputs_G = ev_G + shap_values_G.sum(1)
shap.group_difference_plot(shap_values_G.sum(1), sex_G, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
97%|=================== | 9720/10000 [00:11<00:00]
###Markdown
The SHAP explanation for scenario G shows that all of the demographic parity difference that used to be attached to the brand X purchase score feature in scenario F has now moved to the sex feature, while none of the demographic parity difference attached to the job history feature in scenario F has moved. This can be interpreted to mean that all of the disparity attributed to brand X purchase score in scenario F was due to label bias, while all of the disparity attributed to job history in scenario F was due to feature bias.
###Code
shap.group_difference_plot(shap_values_G, sex_G, X_G.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Explaining Measures of Fairness with SHAPThis hands-on article connects explainable AI methods with fairness measures and shows how modern explainability methods can enhance the usefulness of quantitative fairness metrics. By using [SHAP](http://github.com/slundberg/shap) (a popular explainable AI tool) we can decompose measures of fairness and allocate responsibility for any observed disparity among each of the model's input features. Explaining these quantitative fairness metrics can reduce the concerning tendency to rely on them as opaque standards of fairness, and instead promote their informed use as tools for understanding how model behavior differs between groups.Quantitative fairness metrics seek to bring mathematical precision to the definition of fairness in machine learning [[1](https://books.google.com/books/about/The_Ethical_Algorithm.html?id=QmmtDwAAQBAJ&source=kp_book_description)]. Definitions of fairness however are deeply rooted in human ethical principles, and so on value judgements that often depend critically on the context in which a machine learning model is being used. This practical dependence on value judgements manifests itself in the mathematics of quantitative fairness measures as a set of trade-offs between sometimes mutually incompatible definitions of fairness [[2](https://arxiv.org/abs/1609.05807)]. Since fairness relies on context-dependent value judgements it is dangerous to treat quantitative fairness metrics as opaque black-box measures of fairness [[3](https://arxiv.org/abs/1808.00023)], since doing so may obscure important value judgment choices.<!--This article covers:1. How SHAP can be used to explain various measures of model fairness.2. What SHAP fairness explanations look like in various simulated scenarios.3. How introducing a protected feature can help distiguish between label bias vs. feature bias. 4. Things you can't learn from a SHAP fairness explanation.--> How SHAP can be used to explain various measures of model fairnessThis article is not about how to choose the correct measure of model fairness, but rather about explaining whatever metrics you have chosen. Which fairness metric is most appropriate depends the specifics of your context, such as what laws apply, how the outputs of the machine learning model impact people, and what value you place on various outcomes and hence tradeoffs. For this analysis we use the classic [demographic parity](https://arxiv.org/pdf/1803.02453.pdf) metric, though the same analysis works with other metrics such as [equalized odds](https://arxiv.org/pdf/1803.02453.pdf), [equal opportunity](https://ttic.uchicago.edu/~nati/Publications/HardtPriceSrebro2016.pdf), or [equal quality of service](https://github.com/fairlearn/fairlearn/blob/master/TERMINOLOGY.md). Demographic parity states that the output of the machine learning model should be equal between two or more groups. The demographic parity difference is then a measure of how much disparity there is between model outcomes in two groups of samples. We use demographic parity for this scenario not because it is the right measure of fairness in most situations (it is often not), but because it is straightforward and commonly known. In our simulated case demographic parity is appropriate because in our simulation the true labels (will default on a loan) are statistically independent of sex (the sensitive class we use to check for fairness).**Since SHAP decomposes the model output into feature attributions with the same units as the original model output, we can first decompose the model output among each of the input features using SHAP, and then compute the demographic parity difference (or any other fairness metric) for each input feature seperately using the SHAP value for that feature.** Because the SHAP values sum up to the model's output, the sum of the demographic parity differences of the SHAP values also sum up to the demographic parity difference of the whole model.<!--To will not explainThe danger of treating quantitative fairness metrics as opaque, black-box measures of fairness is strikingly similar to a related problem of treating machine learning models themselves as opaque, black-box predictors. While using a black-box is reasonable in many cases, important problems and assumptions can often be hidden (and hence ignored) when users don't understand the reasons behind a model's behavior \cite{ribeiro2016should}. In response to this problem many explainable AI methods have been developed to help users understand the behavior of modern complex models \cite{vstrumbelj2014explaining,ribeiro2016should,lundberg2017unified}. Here we explore how to apply explainable AI methods to quantitative fairness metrics.--> What SHAP fairness explanations look like in various simulated scenariosTo help us explore the potential usefulness of explaining quantitative fairness metrics we consider a simple simulated scenario based on credit underwriting. In our simulation there are four underlying factors that drive the risk of default for a loan: income stability, income amount, spending restraint, and consistency. These underlying factors are not observed, but they variously influence four different observable features: job history, reported income, credit inquiries, and late payments. Using this simulation we generate random samples and then train a non-linear [XGBoost](https://xgboost.ai/) classifier to predict the probability of default. The same process also works for any other model type supported by SHAP, just remember that explanations of more complicated models hide more of the model's details.By introducing sex-specific reporting errors into a fully specified simulation we can observe how the biases caused by these errors are captured by fairness metrics. In our simulated case we can appropriatly use the demographic parity metric because in our simulation the true labels (will default on a loan) are statistically independent of sex (the sensitive class we use to check for fairness). If the true labels you are predicting (which might be different than the training labels you have access to) are not (marginally) statistically independent of the sensitive feature you are considering, then even a perfect model with no errors would fail demographic parity, and so you will likely want to consider other measures of fairness.<!--This article explores how we can use modern explainable AI tools to enhance traditional quantitative measures of model fairness. It is practical and hands-on, so feel free to follow along in the associated [notebook]. I assume you have a basic understanding of how people measure fairness for machine learning models. If you have never before considered fairness in the context of machine learning, then I recommend starting with a basic introduction such as XXX. I am not writing this Here I do not beforeIt is not meant to be a definitite One futher disclaimer is that as the author of SHAP (a popular explainable AI tool) I am very familar with the strengths and weaknesses of explainable AI tools, but I do not consider myself a fairness expert. So consider this a thought-provoking guide on how explainable AI tools can enhance quantitative measures of model fairnessI consider myself fairly well informed about explainable AI, but I Questions about fairness and equal treatment naturally arise whenever the outputs of a machine learning model impact people. For sensitive use-cases such as credit underwriting or crime prediction there are even laws that govern certain aspects of fairness. While fairness issues are not new, the rising popularily of machine learning model Legal fairness protections are even legally encorced for sensitive use-cases such as credit underwriting or crime prediction, but is also important in many other situations such as quality of service, or you might not initially to consider whenever you are using m Quantifying the fairness of a machine learning model has recently received considerable attention in the research community, and many quantitative fairness metrics have been proposed. In parallel to this work on fairness, explaining the outputs of a machine learning model has also received considerable research attention. %Explainability is intricately connected to fairness, since good explanations enable users to understand a model's behavior and so judge its fairness.Here we connect explainability methods with fairness measures and show how recent explainability methods can enhance the usefulness of quantitative fairness metrics by decomposing them among the model's input features. Explaining quantitative fairness metrics can reduce our tendency to rely on them as opaque standards of fairness, and instead promote their informed use as tools for understanding model behavior between groups. This notebook explores how SHAP can be used to explain quantitative measures of fairness, and so enhance their usefulness. To do this we consider a simple simulated scenario based on credit underwriting. In the simulation below there are four underlying factors that drive the risk of default for a loan: income stability, income amount, spending restraint, and consistency. These underlying factors are not observed, but they influence four different observable features in various ways: job history, reported income, credit inquiries, and late payments. Using this simulation we generate random samples and then train a non-linear gradient boosting tree classifier to predict the probability of default.By introducing sex-specific reporting errors into the simulation we can observe how the biases caused by these errors are captured by fairness metrics. For this analysis we use the classic statistical parity metric, though the same analysis works with other metrics. Note that for a more detailed description of fairness metrics you can check out the [fairlearn package's documentation](https://github.com/fairlearn/fairlearn/blob/master/TERMINOLOGY.mdfairness-of-ai-systems).-->
###Code
# here we define a function that we can call to execute our simulation under
# a variety of different alternative scenarios
import scipy as sp
import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
import shap
%config InlineBackend.figure_format = 'retina'
def run_credit_experiment(N, job_history_sex_impact=0, reported_income_sex_impact=0, income_sex_impact=0,
late_payments_sex_impact=0, default_rate_sex_impact=0,
include_brandx_purchase_score=False, include_sex=False):
np.random.seed(0)
sex = np.random.randint(0, 2, N) == 1 # randomly half men and half women
# four hypothetical causal factors influence customer quality
# they are all scaled to the same units between 0-1
income_stability = np.random.rand(N)
income_amount = np.random.rand(N)
if income_sex_impact > 0:
income_amount -= income_sex_impact/90000 * sex * np.random.rand(N)
income_amount -= income_amount.min()
income_amount /= income_amount.max()
spending_restraint = np.random.rand(N)
consistency = np.random.rand(N)
# intuitively this product says that high customer quality comes from simultaneously
# being strong in all factors
customer_quality = income_stability * income_amount * spending_restraint * consistency
default_rate = -customer_quality
# job history is a random function of the underlying income stability feature
job_history = np.maximum(
10 * income_stability + 2 * np.random.rand(N) - job_history_sex_impact * sex * np.random.rand(N)
, 0)
# reported income is a random function of the underlying income amount feature
reported_income = np.maximum(
10000 + 90000*income_amount + np.random.randn(N) * 10000 - \
reported_income_sex_impact * sex * np.random.rand(N)
, 0)
# credit inquiries is a random function of the underlying spending restraint and income amount features
credit_inquiries = np.round(6 * np.maximum(-spending_restraint + income_amount, 0)) + \
np.round(np.random.rand(N) > 0.1)
# credit inquiries is a random function of the underlying consistency and income stability features
late_payments = np.maximum(
np.round(3 * np.maximum((1-consistency) + 0.2 * (1-income_stability), 0)) + \
np.round(np.random.rand(N) > 0.1) - np.round(late_payments_sex_impact * sex * np.random.rand(N))
, 0)
# bundle everything into a data frame and define the labels based on the default rate and customer quality
X = pd.DataFrame({
"Job history": job_history,
"Reported income": reported_income,
"Credit inquiries": credit_inquiries,
"Late payments": late_payments
})
default_rate = 0.40 + sex * default_rate_sex_impact
y = customer_quality < np.percentile(customer_quality, default_rate * 100)
if include_brandx_purchase_score:
brandx_purchase_score = sex + 0.8 * np.random.randn(N)
X["Brand X purchase score"] = brandx_purchase_score
if include_sex:
X["Sex"] = sex + 0
# build model
import xgboost
model = xgboost.XGBClassifier(max_depth=1, n_estimators=500, subsample=0.5, learning_rate=0.05)
model.fit(X, y)
# build explanation
import shap
explainer = shap.TreeExplainer(model, shap.sample(X, 100))
shap_values = explainer.shap_values(X)
return shap_values, sex, X, explainer.expected_value
###Output
_____no_output_____
###Markdown
<!-- Scenario A: No reporting errorsAs a baseline experiment we refrain from introducing any sex-specific reporting errors. This results in no significant statistical parity difference between the credit score of men and women:--> Scenario A: No reporting errorsOur first experiment is a simple baseline check where we refrain from introducing any sex-specific reporting errors. While we could use any model output to measure demographic parity, we use the continuous log-odds score from a binary XGBoost classifier. As expected, this baseline experiment results in no significant demographic parity difference between the credit scores of men and women. We can see this by plotting the difference between the average credit score for women and men as a bar plot and noting that zero is close to the margin of error (note that negative values mean women have a lower average predicted risk than men, and positive values mean that women have a higher average predicted risk than men):
###Code
N = 10000
shap_values_A, sex_A, X_A, ev_A = run_credit_experiment(N)
model_outputs_A = ev_A + shap_values_A.sum(1)
glabel = "Demographic parity difference\nof model output for women vs. men"
xmin = -0.8
xmax = 0.8
shap.group_difference_plot(shap_values_A.sum(1), sex_A, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
Now we can use SHAP to decompose the model output among each of the model's input features and then compute the demographic parity difference on the component attributed to each feature. As noted above, because the SHAP values sum up to the model's output, the sum of the demographic parity differences of the SHAP values for each feature sum up to the demographic parity difference of the whole model. This means that the sum of the bars below equals the difference between the two bars above (the demographic parity difference of our baseline scenario model).
###Code
slabel = "Demographic parity difference\nof SHAP values for women vs. men"
shap.group_difference_plot(shap_values_A, sex_A, X_A.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario B: An under-reporting bias for women's incomeIn our baseline scenario we designed a simulation where sex had no impact on any of the features or labels used by the model. Here in scenario B we introduce an under-reporting bias for women's income into the simulation. The point here is not how realistic it would be for women's income to be under-reported in the real-world, but rather how we can identify that a sex-specific bias has been introduced and understand where it came from. By plotting the average model output (credit score) for both men and women we can see that the income under-reporting bias has created a significant demographic parity difference:
###Code
shap_values_B, sex_B, X_B, ev_B = run_credit_experiment(N, reported_income_sex_impact=30000)
model_outputs_B = ev_B + shap_values_B.sum(1)
shap.group_difference_plot(shap_values_B.sum(1), sex_B, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
If this were a real application, this demographic parity difference might trigger an in-depth analysis of the model to determine what might be causing the disparity. While this investigation is challenging given just a single demographic parity difference value, it is much easier given the per-feature statistical parity decomposition based on SHAP. Using SHAP we can see there is a significant bias coming from the reported income feature that is increasing the risk of women disproportionately to men. Using the SHAP explanation of the demographic parity difference we can quickly identify which feature has the reporting bias that is causing our model to violate demographic parity:
###Code
shap.group_difference_plot(shap_values_B, sex_B, X_B.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
It is important to note at this point how our assmptions can impact the interpretation of SHAP fairness explanations. In our simulated scenario we know that women actually have identical income profiles to men, so when we see that the reported income feature is biased lower for women than for men, we know that has come from a bias in the measurement errors in the reported income feature. The best way to address this problem would be figure out how to debias the measurement errors in the reported income feature. Doing so would create a more accurate model that also has less demographic disparity. However, if we instead assume that women actually are making less money than men (and it is not just a reporting error), then we can't just "fix" the reported income feature. Instead we have to carefully consider how best to account for real differences in default risk between two protected groups. It is impossible to determine which of these two situations is happening using just the SHAP the fairness explanation, since in both cases the reported income feature will be reponsible for an observed disparity between risks for men and women.
###Code
shap_values_B2, sex_B2, X_B2, ev_B2 = run_credit_experiment(N, income_sex_impact=30000)
model_outputs_B2 = ev_B2 + shap_values_B2.sum(1)
shap.group_difference_plot(shap_values_B2.sum(1), sex_B2, xmin=xmin, xmax=xmax, xlabel=glabel)
shap.group_difference_plot(shap_values_B2, sex_B2, X_B2.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario C: An under-reporting bias for women's late paymentsTo verify that SHAP demographic parity explanations can correctly detect disparies regardless of the direction of effect or source feature, we repeat our previous experiment but instead of an under-reporting bias for income, we introduce an under-reporting bias for women's late payment rates. This results in a significant demographic parity difference for the model's output:
###Code
shap_values_C, sex_C, X_C, ev_C = run_credit_experiment(N, late_payments_sex_impact=2)
model_outputs_C = ev_C + shap_values_C.sum(1)
shap.group_difference_plot(shap_values_C.sum(1), sex_C, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
And as we would hope, the SHAP explantions correctly highlight the late payments feature as the cause of the model's demographic parity difference, as well as the direction of the effect (in this case under-reporting late payments lowers women's predicted risk of default compared to men):
###Code
shap.group_difference_plot(shap_values_C, sex_C, X_C.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario D: An under-reporting bias for women's default ratesThe experiments above focused on introducing reporting errors for specific input features. Next we consider what happens when introduce reporting errors on the training labels through an under-reporting bias on women's default rates (meaning real defaults are less likely to be reported for women than men). Interestingly, for our simulated scenario this results in no significant demographic parity differences in model's output:
###Code
shap_values_D, sex_D, X_D, ev_D = run_credit_experiment(N, default_rate_sex_impact=-0.1) # 20% change
model_outputs_D = ev_D + shap_values_D.sum(1)
shap.group_difference_plot(shap_values_D.sum(1), sex_D, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
We also see no evidence of any demographic parity differences in the SHAP explanations.
###Code
shap.group_difference_plot(shap_values_D, sex_D, X_D.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario E: An under-reporting bias for women's default rates, take 2It may at first be surprising that no statistical parity differences were caused when we introduced an under-reporting bias on women's default rates. This is beacause none of the four features in our simulation are significantly correlated with sex, so none of them could be effectively used to model the bias we introduced into the training labels. If we now instead provide a new feature (brand X purchase score) to the model that is correlated with sex, then we see a statistical parity difference emerge as that feature is used by the model to capture the sex-specific bias in the training labels:
###Code
shap_values_E, sex_E, X_E, ev_E = run_credit_experiment(
N, default_rate_sex_impact=-0.1, include_brandx_purchase_score=True
)
model_outputs_E = ev_E + shap_values_E.sum(1)
shap.group_difference_plot(shap_values_E.sum(1), sex_E, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
When we explain the statistical parity difference with SHAP we see that, as expected, the brand X purchase score feature drives the difference. In this case it is not because we have a bias in how we measure the brand X purchase score feature, but rather because we have a bias in our training label that gets captured by any input features that are suffciently correlated with sex:
###Code
shap.group_difference_plot(shap_values_E, sex_E, X_E.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Scenario F: Teasing apart multiple under-reporting biasesWhen there is a single cause of reporting bias then both the classic demographic parity test on the model's output, and the SHAP explanation of the demographic parity test capture the same bias effect (though the SHAP explanation can often have more statistical significance since it isolates the feature causing the bias). But what happens when there are multiple causes of bias occuring in a dataset? In this experiment we introduce two such biases, an under-reporting of women's default rates, and an under-reporting of women's job history. These biases tend to offset each other in the global average and so the classic statistical parity test on the model's output shows no measurable disparity:
###Code
shap_values_F, sex_F, X_F, ev_F = run_credit_experiment(
N, default_rate_sex_impact=-0.1, include_brandx_purchase_score=True,
job_history_sex_impact=2
)
model_outputs_F = ev_F + shap_values_F.sum(1)
shap.group_difference_plot(shap_values_F.sum(1), sex_F, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
_____no_output_____
###Markdown
However, if we look at the SHAP explaination of the statistical parity difference we clearly see both (counteracting) biases:
###Code
shap.group_difference_plot(shap_values_F, sex_F, X_F.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____
###Markdown
Identifying multiple potentially offsetting bias effects can be important since while on average there is no disparate impact on men or women, there is disparate impact on individuals. For example, in this simulation women who have not shopped at brand X will recive a lower credit score than they should have because of the bias present in job history reporting. How introducing a protected feature can help distiguish between label bias and feature biasIn scenario F we were able to pick apart two distict forms of bias, one coming from job history under-reporting and one coming from default rate under-reporting. However, the bias from default rate under-reporting was not attributed to the default rate label, but rather to the brand X purchase score feature that happened to be correlated with sex. This still leaves us with some uncertainty about the true sources of statistical parity differences, since any difference attributed to an input feature could be due to an issue with that feature, or due to an issue with the training labels.It turns out that in this case we can help disentangle label bias from feature bias by introducing sex as a variable directly into the model. The goal of introducing sex as an input feature is to cause the label bias to fall entirely on the sex feature, leaving the feature biases untouched. So we can then distinguish between label biases and feature biases by comparing the results of scenario F above to our new scenario G. This of course creates an even stronger demographic parity difference than we had before, but that is fine since our goal here is not bias mitigation, but rather bias understanding.
###Code
shap_values_G, sex_G, X_G, ev_G = run_credit_experiment(
N, default_rate_sex_impact=-0.1, include_brandx_purchase_score=True,
job_history_sex_impact=2, include_sex=True
)
model_outputs_G = ev_G + shap_values_G.sum(1)
shap.group_difference_plot(shap_values_G.sum(1), sex_G, xmin=xmin, xmax=xmax, xlabel=glabel)
###Output
99%|===================| 9941/10000 [00:11<00:00]
###Markdown
The SHAP explanation for scenario G shows that all of the demographic parity difference that used to be attached to the brand X purchase score feature in scenario F has now moved to the sex feature, while none of the demographic parity difference attached to the job history feature in scenario F has moved. This can be interpreted to mean that all of the disparity attributed to brand X purchase score in scenario F was due to label bias, while all of the disparity attributed to job history in scenario F was due to feature bias.
###Code
shap.group_difference_plot(shap_values_G, sex_G, X_G.columns, xmin=xmin, xmax=xmax, xlabel=slabel)
###Output
_____no_output_____ |
Random_Forest_Models.ipynb | ###Markdown
###Code
### This is test
name="helloe"
print(name)
# Pandas is used for data manipulation
import pandas as pd
# Read in data and display first 5 rows
features = pd.read_csv('temps.csv')
features.head(5)
###Output
_____no_output_____
###Markdown
###Code
### This is test
name="helloe"
print(name)
# Pandas is used for data manipulation
import pandas as pd
# Read in data and display first 5 rows
features = pd.read_csv('temps.csv')
features.head(5)
###Output
_____no_output_____
###Markdown
Identify Anomalies/ Missing Data
###Code
print('The shape of our features is:', features.shape)
###Output
The shape of our features is: (348, 12)
###Markdown
To identify anomalies, we can quickly compute summary statistics.
###Code
# Descriptive statistics for each column
features.describe()
###Output
_____no_output_____
###Markdown
One-Hot Encoding
###Code
# One-hot encode the data using pandas get_dummies
features = pd.get_dummies(features)
# Display the first 5 rows of the last 12 columns
features.iloc[:,5:].head(5)
## chandra's code to convert to numpy directly
print(type(features.values))
print((features.values.ndim))
print((features.values.shape))
###Output
<class 'numpy.ndarray'>
2
(348, 18)
###Markdown
Features and Targets and Convert Data to Arrays
###Code
# Use numpy to convert to arrays
import numpy as np
# Labels are the values we want to predict
labels = np.array(features['actual'])
# Remove the labels from the features
# axis 1 refers to the columns
features= features.drop('actual', axis = 1)
# Saving feature names for later use
feature_list = list(features.columns)
# Convert to numpy array
features = np.array(features)
features
print(type(features))
print((features.ndim))
print((features.shape))
###Output
<class 'numpy.ndarray'>
2
(348, 17)
###Markdown
Training and Testing Sets
###Code
# Using Skicit-learn to split data into training and testing sets
from sklearn.model_selection import train_test_split
# Split the data into training and testing sets
train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size = 0.25, random_state = 42)
print('Training Features Shape:', train_features.shape)
print('Training Labels Shape:', train_labels.shape)
print('Testing Features Shape:', test_features.shape)
print('Testing Labels Shape:', test_labels.shape)
###Output
Training Features Shape: (261, 17)
Training Labels Shape: (261,)
Testing Features Shape: (87, 17)
Testing Labels Shape: (87,)
###Markdown
Establish Baseline
###Code
# The baseline predictions are the historical averages
baseline_preds = test_features[:, feature_list.index('average')]
# Baseline errors, and display average baseline error
baseline_errors = abs(baseline_preds - test_labels)
print('Average baseline error: ', round(np.mean(baseline_errors), 2))
###Output
Average baseline error: 5.06
###Markdown
Train Model
###Code
# Import the model we are using
from sklearn.ensemble import RandomForestRegressor
# Instantiate model with 1000 decision trees
rf = RandomForestRegressor(n_estimators = 1000, random_state = 42)
# Train the model on training data
rf.fit(train_features, train_labels);
###Output
_____no_output_____
###Markdown
Make Predictions on the Test Set
###Code
# Use the forest's predict method on the test data
predictions = rf.predict(test_features)
# Calculate the absolute errors
errors = abs(predictions - test_labels)
# Print out the mean absolute error (mae)
print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')
###Output
Mean Absolute Error: 3.87 degrees.
###Markdown
Determine Performance Metrics
###Code
# Calculate mean absolute percentage error (MAPE)
mape = 100 * (errors / test_labels)
# Calculate and display accuracy
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
###Output
Accuracy: 93.93 %.
###Markdown
Improve Model if Necessary Interpret Model and Report Results Visualizing a Single Decision Tree
###Code
# Import tools needed for visualization
from sklearn.tree import export_graphviz
import pydot
# Pull out one tree from the forest
tree = rf.estimators_[5]
# Import tools needed for visualization
from sklearn.tree import export_graphviz
import pydot
# Pull out one tree from the forest
tree = rf.estimators_[5]
# Export the image to a dot file
export_graphviz(tree, out_file = 'tree.dot', feature_names = feature_list, rounded = True, precision = 1)
# Use dot file to create a graph
(graph, ) = pydot.graph_from_dot_file('/content/tree.dot')
# Write graph to a png file
graph.write_png('tree.png')
###Output
_____no_output_____
###Markdown
Here is the reduced size tree annotated with labels
###Code
# Limit depth of tree to 3 levels
rf_small = RandomForestRegressor(n_estimators=10, max_depth = 3)
rf_small.fit(train_features, train_labels)
# Extract the small tree
tree_small = rf_small.estimators_[5]
# Save the tree as a png image
export_graphviz(tree_small, out_file = '/content/small_tree.dot', feature_names = feature_list, rounded = True, precision = 1)
(graph, ) = pydot.graph_from_dot_file('/content/small_tree.dot')
graph.write_png('/content/small_tree.png');
###Output
_____no_output_____
###Markdown
Variable Importances
###Code
# Get numerical feature importances
importances = list(rf.feature_importances_)
# List of tuples with variable and importance
feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)]
# Sort the feature importances by most important first
feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)
# Print out the feature and importances
[print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances];
# New random forest with only the two most important variables
rf_most_important = RandomForestRegressor(n_estimators= 1000, random_state=42)
# Extract the two most important features
important_indices = [feature_list.index('temp_1'), feature_list.index('average')]
train_important = train_features[:, important_indices]
test_important = test_features[:, important_indices]
# Train the random forest
rf_most_important.fit(train_important, train_labels)
# Make predictions and determine the error
predictions = rf_most_important.predict(test_important)
errors = abs(predictions - test_labels)
# Display the performance metrics
print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')
mape = np.mean(100 * (errors / test_labels))
accuracy = 100 - mape
print('Accuracy:', round(accuracy, 2), '%.')
###Output
Mean Absolute Error: 3.92 degrees.
Accuracy: 93.76 %.
###Markdown
Visualizations
###Code
# Import matplotlib for plotting and use magic command for Jupyter Notebooks
import matplotlib.pyplot as plt
%matplotlib inline
# Set the style
plt.style.use('fivethirtyeight')
# list of x locations for plotting
x_values = list(range(len(importances)))
# Make a bar chart
plt.bar(x_values, importances, orientation = 'vertical')
# Tick labels for x axis
plt.xticks(x_values, feature_list, rotation='vertical')
# Axis labels and title
plt.ylabel('Importance'); plt.xlabel('Variable'); plt.title('Variable Importances');
###Output
_____no_output_____
###Markdown
plot the entire dataset with predictions highlighted.
###Code
# Use datetime for creating date objects for plotting
import datetime
# Dates of training values
months = features[:, feature_list.index('month')]
days = features[:, feature_list.index('day')]
years = features[:, feature_list.index('year')]
# List and then convert to datetime object
dates = [str(int(year)) + '-' + str(int(month)) + '-' + str(int(day)) for year, month, day in zip(years, months, days)]
dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in dates]
# Dataframe with true values and dates
true_data = pd.DataFrame(data = {'date': dates, 'actual': labels})
# Dates of predictions
months = test_features[:, feature_list.index('month')]
days = test_features[:, feature_list.index('day')]
years = test_features[:, feature_list.index('year')]
# Column of dates
test_dates = [str(int(year)) + '-' + str(int(month)) + '-' + str(int(day)) for year, month, day in zip(years, months, days)]
# Convert to datetime objects
test_dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in test_dates]
# Dataframe with predictions and dates
predictions_data = pd.DataFrame(data = {'date': test_dates, 'prediction': predictions})
# Plot the actual values
plt.plot(true_data['date'], true_data['actual'], 'b-', label = 'actual')
# Plot the predicted values
plt.plot(predictions_data['date'], predictions_data['prediction'], 'ro', label = 'prediction')
plt.xticks(rotation = '60');
plt.legend()
# Graph labels
plt.xlabel('Date'); plt.ylabel('Maximum Temperature (F)'); plt.title('Actual and Predicted Values');
###Output
_____no_output_____
###Markdown
A little bit of work for a nice looking graph! It doesn’t look as if we have any noticeable outliers that need to be corrected
###Code
###Output
_____no_output_____
###Markdown
Lift chart
###Code
### lift chart
### lift chart
# Regression chart.
def chart_regression(pred, y, sort=True):
t = pd.DataFrame({'pred': pred, 'y': y})
#t = pd.DataFrame({'pred': pred, 'y': y.flatten()})
if sort:
t.sort_values(by=['y'], inplace=True)
plt.plot(t['y'].tolist(), label='expected')
plt.plot(t['pred'].tolist(), label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
# Plot the chart
chart_regression(predictions_data['prediction'],true_data['temp_1'])
predictions = rf_most_important.predict(test_important)
(predi)
###Output
_____no_output_____ |
gremlin/property-graph-data-modelling/notebooks/data-model-3.ipynb | ###Markdown
Use Case 3 Find the people in more senior roles at the companies where X worked What questions would we have to ask of our data? _"Who were in senior roles at the companies where X worked?"_Once again, we're looking at roles. But now we see there are some structural relations between roles – a role hierarchy. Structural relations are best modelled using edges. Therefore, we will now promote role to become a vertex: With role now a vertex, our overall data model looks like this: Sample dataset Creating some revised sample data
###Code
%load_ext ipython_unittest
%run '../util/neptune.py'
neptune.clear()
g = neptune.graphTraversal()
from datetime import *
(g.
addV('Person').property(id,'p-1').property('firstName','Martha').property('lastName','Rivera').
addV('Person').property(id,'p-2').property('firstName','Richard').property('lastName','Roe').
addV('Person').property(id,'p-3').property('firstName','Li').property('lastName','Juan').
addV('Person').property(id,'p-4').property('firstName','John').property('lastName','Stiles').
addV('Person').property(id,'p-5').property('firstName','Saanvi').property('lastName','Sarkar').
addV('Role').property(id,'r-1').property('name','Analyst').
addV('Role').property(id,'r-2').property('name','Senior Analyst').
addV('Role').property(id,'r-3').property('name','Principal Analyst').
addV('Role').property(id,'r-4').property('name','Associate Analyst').
addV('Role').property(id,'r-5').property('name','Manager').
addV('Company').property(id,'c-1').property('name','Example Corp').
addV('Company').property(id,'c-2').property('name','AnyCompany').
addV('Location').property(id,'l-1').property('name','HQ').property('address','100 Main St, Anytown').
addV('Location').property(id,'l-2').property('name','Offices').property('address','Downtown, Anytown').
addV('Location').property(id,'l-3').property('name','Exchange').property('address','50 High St, Anytown').
addV('Job').property(id,'j-1').property('from',datetime(2010,10,20)).property('to',datetime(2017,11,1)).
addV('Job').property(id,'j-2').property('from',datetime(2011,2,16)).property('to',datetime(2013,9,17)).
addV('Job').property(id,'j-3').property('from',datetime(2013,11,21)).property('to',datetime(2016,3,23)).
addV('Job').property(id,'j-4').property('from',datetime(2015,2,2)).property('to',datetime(2018,2,8)).
addV('Job').property(id,'j-5').property('from',datetime(2011,7,15)).property('to',datetime(2017,10,14)).
addV('Job').property(id,'j-6').property('from',datetime(2012,3,23)).property('to',datetime(2013,11,1)).
V('r-1').addE('PARENT_ROLE').to(V('r-2')).
V('r-2').addE('PARENT_ROLE').to(V('r-3')).
V('r-4').addE('PARENT_ROLE').to(V('r-5')).
V('c-1').addE('LOCATION').to(V('l-1')).
V('c-1').addE('LOCATION').to(V('l-2')).
V('c-2').addE('LOCATION').to(V('l-3')).
V('p-1').addE('JOB').to(V('j-1')).
V('j-1').addE('ROLE').to(V('r-3')).
V('j-1').addE('COMPANY').to(V('c-1')).
V('j-1').addE('LOCATION').to(V('l-1')).
V('p-2').addE('JOB').to(V('j-2')).
V('j-2').addE('ROLE').to(V('r-2')).
V('j-2').addE('COMPANY').to(V('c-1')).
V('j-2').addE('LOCATION').to(V('l-2')).
V('p-3').addE('JOB').to(V('j-3')).
V('j-3').addE('ROLE').to(V('r-1')).
V('j-3').addE('COMPANY').to(V('c-1')).
V('j-3').addE('LOCATION').to(V('l-1')).
V('p-4').addE('JOB').to(V('j-4')).
V('j-4').addE('ROLE').to(V('r-1')).
V('j-4').addE('COMPANY').to(V('c-1')).
V('j-4').addE('LOCATION').to(V('l-2')).
V('p-5').addE('JOB').to(V('j-5')).
V('j-5').addE('ROLE').to(V('r-5')).
V('j-5').addE('COMPANY').to(V('c-2')).
V('j-5').addE('LOCATION').to(V('l-3')).
V('p-3').addE('JOB').to(V('j-6')).
V('j-6').addE('ROLE').to(V('r-4')).
V('j-6').addE('COMPANY').to(V('c-2')).
V('j-6').addE('LOCATION').to(V('l-3')).
toList())
###Output
_____no_output_____
###Markdown
Querying the data Query 3 – Who were in senior roles at the companies where Li worked? To answer this question, we'll have to perform the following steps: 1. Start at the Person's vertex 2. Follow JOB and ROLE edges to Roles 3. Traverse up Role hierarchy 4. For each parent Role: - Get associated Jobs - Filter Jobs by date - Get Role and Person details for each Job
###Code
%%unittest
results = (g.V('p-3').out('JOB').as_('j1'). # traverse from Person to each Job
out('ROLE'). # traverse to job's Role
repeat(out('PARENT_ROLE')).until(outE().count().is_(0)). # climb the Role hierarchy...
emit().in_('ROLE').as_('j2'). # for each Role encountered in the hierarchy, traverse to the Jobs associated with that role
or_(
(where('j1', between('j2', 'j2')).by('from').by('from').by('to')), # filter based on the dates of the original Job (j1)
(where('j1', between('j2', 'j2')).by('to').by('from').by('to')),
(where('j1', lte('j2').and_(gt('j2'))).by('from').by('from').by('to').by('from'))
).
order().by(id).
project('role', 'name'). # for each Job emitted from above
by(out('ROLE').values('name')). # get Role
by(in_('JOB').values('firstName', 'lastName').fold()). # get Person's name
toList())
assert results == [{'role': 'Principal Analyst', 'name': ['Martha', 'Rivera']},
{'role': 'Manager', 'name': ['Saanvi', 'Sarkar']}]
###Output
_____no_output_____
###Markdown
Query 2 – Who worked for Example Corp, and at which locations, between 2015-2017?
###Code
%%unittest
results = (g.
V('c-1').in_('COMPANY').
or_(
(has('from', between(datetime(2015,1,1), datetime(2018,1,1)))),
(has('to', between(datetime(2015,1,1), datetime(2018,1,1))))
).
order().by(id).
project('name', 'location').
by(in_('JOB').values('firstName', 'lastName').fold()).
by(out('LOCATION').values('name', 'address').fold()).
toList())
assert results == [{'name': ['Martha', 'Rivera'], 'location': ['HQ', '100 Main St, Anytown']},
{'name': ['Li', 'Juan'], 'location': ['HQ', '100 Main St, Anytown']},
{'name': ['John', 'Stiles'], 'location': ['Offices', 'Downtown, Anytown']}]
###Output
_____no_output_____
###Markdown
Broken tests Once again we've broken the test for Query 1. Not surprising given that role has been promoted to a vertex. Query 1 [BROKEN] – Which companies has Li worked for, and in what roles?
###Code
%%unittest
results = (g.V('p-3').
out('JOB').
project('company', 'role').
by(out('COMPANY').values('name')).
by('role').
toList())
assert results == [{'company': 'Example Corp', 'role': 'Analyst'},
{'company': 'AnyCompany', 'role': 'Associate Analyst'}]
###Output
_____no_output_____
###Markdown
Query 1 (revised) – Which companies has Li worked for, and in what roles?
###Code
%%unittest
results = (g.V('p-3').
out('JOB').
project('company', 'role').
by(out('COMPANY').values('name')).
by(out('ROLE').values('name')).
toList())
assert results == [{'company': 'Example Corp', 'role': 'Analyst'},
{'company': 'AnyCompany', 'role': 'Associate Analyst'}]
###Output
_____no_output_____ |
api-book/_build/html/_sources/chapter-7-final-serving/machine_learning_model.ipynb | ###Markdown
ML model Now that we have set up user creation and authentification, we need our main selling point - the machine learning model. We will create an ML model that outputs how likely is a person surviving after a heart attack given a set of features {cite}`heart_disease`.The data source can be found here: https://www.kaggle.com/andrewmvd/heart-failure-clinical-data Python packages
###Code
# Data wrangling
import pandas as pd
# Xgboost model
import xgboost as xgb
# Directory traversal
import os
# Classifier accuracy
from sklearn.metrics import roc_auc_score
# Ploting
import matplotlib.pyplot as plt
# Train test split
from sklearn.model_selection import train_test_split
# Array math
import numpy as np
# Model saving
import pickle
import json
# Cross validation split
from sklearn.model_selection import KFold
# Parameter grid creation
from sklearn.model_selection import ParameterGrid
###Output
_____no_output_____
###Markdown
Data input
###Code
# Input data
d = pd.read_csv('ML_API/ml_input/heart_failure_clinical_records_dataset.csv')
print(f"Shape of data: {d.shape}")
# Listing out the columns
print(f"Columns:\n{d.columns.values}")
# Printing out the head of data
print(f"Head of data:\n{d.head()}")
###Output
Shape of data: (299, 13)
Columns:
['age' 'anaemia' 'creatinine_phosphokinase' 'diabetes' 'ejection_fraction'
'high_blood_pressure' 'platelets' 'serum_creatinine' 'serum_sodium' 'sex'
'smoking' 'time' 'DEATH_EVENT']
Head of data:
age anaemia creatinine_phosphokinase diabetes ejection_fraction \
0 75.0 0 582 0 20
1 55.0 0 7861 0 38
2 65.0 0 146 0 20
3 50.0 1 111 0 20
4 65.0 1 160 1 20
high_blood_pressure platelets serum_creatinine serum_sodium sex \
0 1 265000.00 1.9 130 1
1 0 263358.03 1.1 136 1
2 0 162000.00 1.3 129 1
3 0 210000.00 1.9 137 1
4 0 327000.00 2.7 116 0
smoking time DEATH_EVENT
0 0 4 1
1 0 6 1
2 1 7 1
3 0 7 1
4 0 8 1
###Markdown
Collumn explanation We will be using the following columns in our model:**DEATH_EVENT** - `boolean` - whether the patient died or not after the heart attack.**age** - `float` - age durring heart attack.**anaemia** - `boolean` - decrease of red blood cells.**creatinine_phosphokinase** - float - level of the CPK enzyme in the blood (mcg/L).**diabetes** - `boolean` - whether the patient has diabetes or not.**ejection_fraction** - `float` - percentage of blood leaving the heart at each contraction (percentage).**high_blood_pressure** - `boolean` - whether the patient has high blood pressure or not.**platelets** - `float` - platelets in the blood (kiloplatelets/mL)**serum_creatinine** - `float` - serum creatinine level (mg/dL).**serum_sodium** - `float` - serum sodium level (mEq/L).**sex** - `boolean` - woman or man (binary).**smoking** - `boolean` - whether the person smoked or not.
###Code
# Defining the columns and saving for later
y_var = 'DEATH_EVENT'
# Numerics
numeric_columns = [
'age',
'creatinine_phosphokinase',
'ejection_fraction',
'platelets',
'serum_creatinine',
'serum_sodium'
]
# Categoricals
categorical_columns = [
'anaemia',
'diabetes',
'high_blood_pressure',
'sex',
'smoking'
]
###Output
_____no_output_____
###Markdown
EDA Y variable distribution
###Code
# Distribution of the binary variable
counts = d.groupby(y_var, as_index=False).size()
counts['distr'] = counts['size'] / counts['size'].sum()
# Visualizing the distribution
plt.bar(counts['DEATH_EVENT'].astype(str), counts['distr'])
plt.xlabel("DEATH_EVENT")
plt.ylabel("Share of total observations")
plt.title("Distribution of DEATH_EVENT")
###Output
_____no_output_____
###Markdown
The Y variable classes are not perfectly balanced but we cannot say that they are extremely unbalanced as well (in some cases, one class observations comprise less than 1% of data). In our model, we will not be doing any artificial class balancing. Numeric feature impact
###Code
## Numeric feature impact
for column in numeric_columns:
plt.figure(figsize=(7, 5))
plt.hist(d.loc[d[y_var] == 0, column], bins=20, alpha=0.5, label='No', density=True)
plt.hist(d.loc[d[y_var] == 1, column], bins=20, alpha=0.5, label='Yes', density=True)
plt.legend()
plt.xlabel(column)
plt.ylabel("Distribution")
plt.title(f"Distribution of {column} by DEATH_EVENT")
###Output
_____no_output_____
###Markdown
There are no clear separation of classes in either of the numeric features. Xgboost model We will do a 5 fold cross validation hyperparameter search in order to find the best XGB model.
###Code
# Defining the final feature list
final_features = numeric_columns + ['sex', 'high_blood_pressure']
# Defining a list of hyperparameters
hp_dict = {
'n_estimators': [30, 60, 120, 160],
'max_depth': [2, 3, 4, 5],
'learning_rate': [0.1, 0.2, 0.3],
'eval_metric': ['logloss'],
'use_label_encoder': [False],
'min_child_weight': [0.5, 1]
}
# Creating the parameter grid
param_grid = ParameterGrid(hp_dict)
# Initiating the empty placeholder for the AUC scores
auc_scores = []
# Iterating over the parameter grid
for params in param_grid:
# Spliting the data into 5 folds
kf = KFold(n_splits=5, shuffle=True, random_state=3)
# Initiating the empty placeholder for the scores
auc_scores_folds = []
# Iterating over the folds
for train_index, test_index in kf.split(d):
# Splitting the data into training and test sets
train, test = d.iloc[train_index], d.iloc[test_index]
train_X_fold, test_X_fold = train[final_features], test[final_features]
train_y_fold, test_y_fold = train[y_var], test[y_var]
# Creating the XGBoost model
model = xgb.XGBClassifier(**params)
# Fitting the model
model.fit(train_X_fold, train_y_fold)
# Predicting the test set
preds = model.predict_proba(test_X_fold)[:, 1]
# Calculating the AUC score
auc_scores_folds.append(roc_auc_score(test_y_fold, preds))
# Averaging the scores and appending to the master list
auc_scores.append(np.mean(auc_scores_folds))
# Creating the dataframe with the hyperparameters and AUC scores
df_scores = pd.DataFrame(param_grid)
df_scores['mean_auc'] = [round(x, 3) for x in auc_scores]
# Sorting by mean AUC score
df_scores = df_scores.sort_values('mean_auc', ascending=False)
df_scores.reset_index(drop=True, inplace=True)
# Printing the top 10 hyperparameters
print(f"Top 10 hyperparameters:\n{df_scores.head(10)}")
# Saving the best hyper parameters
best_hp = df_scores.iloc[0].to_dict()
del best_hp['mean_auc']
###Output
Top 10 hyperparameters:
eval_metric learning_rate max_depth min_child_weight n_estimators \
0 logloss 0.1 2 0.5 30
1 logloss 0.1 5 1.0 30
2 logloss 0.1 2 1.0 30
3 logloss 0.1 4 1.0 30
4 logloss 0.1 3 0.5 30
5 logloss 0.1 5 1.0 60
6 logloss 0.1 3 1.0 30
7 logloss 0.2 2 1.0 30
8 logloss 0.1 5 0.5 30
9 logloss 0.1 4 0.5 30
use_label_encoder mean_auc
0 False 0.776
1 False 0.774
2 False 0.770
3 False 0.768
4 False 0.767
5 False 0.760
6 False 0.760
7 False 0.759
8 False 0.758
9 False 0.755
###Markdown
Fitting the final model We will be using xgboost as our final model for this project. The final hyperparameters are the top ones from the K-FOLD analysis.
###Code
# X and Y for the model
X, Y = d[final_features].copy(), d[y_var].values
# Fitting the model
clf = xgb.XGBClassifier(**best_hp)
clf.fit(X, Y)
# Creating the feature importance frame
feature_importance = pd.DataFrame({
'feature': final_features,
'importance': clf.feature_importances_
}).sort_values("importance", ascending=True)
# Ploting
plt.figure(figsize=(7, 5))
plt.barh(feature_importance['feature'], feature_importance['importance'])
plt.ylabel("Feature")
plt.xlabel("Importance")
plt.title("Feature importance")
plt.show()
###Output
_____no_output_____
###Markdown
Saving all the necessary objects In order to successfuly serve the model, we need to save all the necessary objects. The objects are: * The model itself. * The input data schema. The input data schema will be saved as a dictionary in a JSON file with the following structure: ```{ "input_schema": { "columns": [ { "name": "column_name", "type": "float" }, { "name": "column_name", "type": "float" }, ... ] }}```The model will be saved as a pickle file as well.
###Code
print(f"Final feature list in the correct order:\n{X.columns.values}")
# Creating the input schema
features = []
for col in X.columns:
if col in numeric_columns:
features.append({
"name": col,
"type": "numeric"
})
else:
features.append({
"name": col,
"type": "boolean"
})
input_schema = {
"input_schema": {
"columns": features
}
}
# Creating the output dir
output_dir = os.path.join("ML_API", "ml_model")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Saving the model to a pickle file
with open(os.path.join(output_dir, "model.pkl"), "wb") as f:
pickle.dump(clf, f)
# Saving the input schema
with open(os.path.join(output_dir, "input_schema.json"), "w") as f:
json.dump(input_schema, f)
###Output
Final feature list in the correct order:
['age' 'creatinine_phosphokinase' 'ejection_fraction' 'platelets'
'serum_creatinine' 'serum_sodium' 'sex' 'high_blood_pressure']
|
sources/curriculum/3_modeling_and_machine_learning/temporal-cross-validation/Temporal cross validation example.ipynb | ###Markdown
Here's the shell for a simple temporal cross validation loop. It's based on Rayid's [magicloops](https://github.com/rayidghani/magicloops/blob/master/temporal_validate.py).First, we must set some parameters:
###Code
# start time of our data
start_time = datetime.strptime('2018-01-01', '%Y-%m-%d')
# last date of data including labels
# (this past Sunday!)
end_time = datetime.strptime('2018-06-30', '%Y-%m-%d')
# how far ahead we're predicting, e.g. '1' means the label
# takes a 1 if an event takes place in the next month
# and a 0 otherwise
# unit: months
prediction_windows = [1]
# how often should we score?
# e.g. police might predict a year ahead (prediction window) every day (update window)
# unit: months
update_window = 1
df = pd.read_csv('temporal_CV.csv')
df
###Output
_____no_output_____
###Markdown
convert to date format
###Code
df['jail_entry'] = pd.to_datetime(df['jail_entry'], format = '%m/%d/%y')
###Output
_____no_output_____
###Markdown
Dataframe for storing results
###Code
results = pd.DataFrame(columns=['train_features_start_time', 'train_features_end_time',
'train_label_start_time', 'train_label_end_time',
'test_features_start_time', 'test_features_end_time',
'test__label_start_time', 'test_label_end_time',
'accuracy'])
# the last test labels extend to the end of our data
test_label_end_time = end_time
for prediction_window in prediction_windows:
# we'll start at the end to ensure the "freshest" possible data.
# keep looping backward until there isn't enough time for two
# prediction windows. (Remember, the train and test prediction
# windows cannot overlap.)
while (test_label_end_time >= start_time + 2 * relativedelta(months=+prediction_window)):
# the prediction window equals the time between the start and end of the test label
test_label_start_time = test_label_end_time - relativedelta(months=+prediction_window)
# the end of the train label window and test features window should precede the beginning of the test label window
train_label_end_time = test_features_end_time = test_label_start_time - relativedelta(days=+1)
# the prediction window also equals the time between the start and end of the train label
train_label_start_time = train_label_end_time - relativedelta(months=+prediction_window)
# the end of the train features should precede the beginning of the train labels
train_features_end_time = train_label_start_time - relativedelta(days=+1)
# for this example, we'll use all the data back to start_date
train_features_start_time = test_features_start_time = start_time
# only run if there's enough data for a full train label window
while (train_label_start_time >= start_time):
#train_label_start_time -= relativedelta(months=+prediction_window)
# It's safer to split the data then generate features and labels
raw_train_X = df[(df.jail_entry >= train_features_start_time) & (df.jail_entry <= train_features_end_time)]
raw_train_y = df[(df.jail_entry >= train_label_start_time) & (df.jail_entry <= train_label_end_time)]
raw_test_X = df[(df.jail_entry >= test_features_start_time) & (df.jail_entry <= test_features_end_time)]
raw_test_y = df[(df.jail_entry >= test_label_start_time) & (df.jail_entry <= test_label_end_time)]
# create the matrices we need
# ensure that each entity is only represented at the appropriate times
# e.g. B should not appear before 2/2/18
# fit on train data
# predict on test data
# calculate accuracy
# write results to the results dataframe
test_label_end_time -= relativedelta(months=+update_window)
###Output
_____no_output_____ |
lectures/l22-recurrent-neural-networks.ipynb | ###Markdown
Predicting parts of speech with an LSTMLet's preview the end result. We want to take a sentence and output the part of speech for each word in that sentence. Something like this:**Code**```pythonnew_sentence = "I is a teeth"...predictions = model(processed_sentence)...```**Output**```textI => Nounis => Verba => Determinerteeth => Noun```
###Code
def ps(s):
"""Process String: convert a string into a list of lowercased words."""
return s.lower().split()
# https://parts-of-speech.info/
# Tags:
# D - determiner
# N - noun
# V - verb
dataset = [
(ps("The dog ate the apple"), ["D", "N", "V", "D", "N"]),
(ps("Everybody read that book"), ["N", "V", "D", "N"]),
(ps("Trapp is sleeping"), ["N", "V", "V"]),
(ps("Everybody ate the apple"), ["N", "V", "D", "N"]),
(ps("Cats are good"), ["N", "V", "D"]),
(ps("Dogs are not as good as cats"), ["N", "V", "D", "D", "D", "D", "N"]),
(ps("Dogs eat dog food"), ["N", "V", "N", "N"]),
(ps("Watermelon is the best food"), ["N", "V", "D", "D", "N"]),
(ps("I want a milkshake right now"), ["N", "V", "D", "N", "D", "D"]),
(ps("I have too much homework"), ["N", "V", "D", "D", "N"]),
(ps("Zoom won't work"), ["N", "D", "V"]),
(ps("Pie also sounds good"), ["N", "D", "V", "D"]),
(ps("The college is having the department fair this Friday"), ["D", "N", "V", "V", "D", "N", "N", "D", "N"]),
(ps("Research interests span many areas"), ["N", "N", "V", "D", "N"]),
(ps("Alex is finishing his Ph.D"), ["N", "V", "V", "D", "N"]),
(ps("She is the author"), ["N", "V", "D", "N"]),
(ps("It is almost the end of the semester"), ["N", "V", "D", "D", "N", "D", "D", "N"]),
(ps("Blue is a color"), ["N", "V", "D", "N"]),
(ps("They wrote a book"), ["N", "V", "D", "N"]),
(ps("The syrup covers the pancake"), ["D", "N", "V", "D", "N"]),
(ps("Harrison has these teeth"), ["N", "V", "D", "N"]),
(ps("The numbers are fractions"), ["D", "N", "V", "N"]),
(ps("Yesterday happened"), ["N", "V"]),
(ps("Caramel is sweet"), ["N", "V", "D"]),
(ps("Computers use electricity"), ["N", "V", "N"]),
(ps("Gold is a valuable thing"), ["N", "V", "D", "D", "N"]),
(ps("This extension cord helps"), ["D", "D", "N", "V"]),
(ps("It works on my machine"), ["N", "V", "D", "D", "N"]),
(ps("We have the words"), ["N", "V", "D", "N"]),
(ps("Trapp is a dog"), ["N", "V", "D", "N"]),
(ps("This is a computer"), ["N", "V", "D", "N"]),
(ps("I love lamps"), ["N", "V", "N"]),
(ps("I walked outside"), ["N", "V", "N"]),
(ps("You never bike home"), ["N", "D", "V", "N"]),
(ps("You are a wizard Harry"), ["N", "V", "D", "N", "N"]),
(ps("Trapp ate the shoe"), ["N", "V", "D", "N"]),
(ps("Jett failed his test"), ["N", "V", "D", "N"]),
(ps("Alice won the game"), ["N", "V", "D", "N"]),
(ps("The class lasted a semester"), ["D", "N", "V", "D", "N"]),
(ps("The tree had a branch"), ["D", "N", "V", "D", "N"]),
(ps("I ran a race"), ["N", "V", "D", "N"]),
(ps("The dog barked"), ["D", "N", "V"]),
(ps("Toby hit the wall"), ["N", "V", "D", "N"]),
(ps("Zayn ate an apple"), ["N", "V", "D", "N"]),
(ps("The cat fought the dog"), ["D", "N", "V", "D", "N"]),
(ps("I got an A"), ["N", "V", "D", "N"]),
(ps("The A hurt"), ["D", "N", "V"]),
(ps("I jump"), ["N", "V"]),
(ps("I drank a yerb"), ["N", "V", "D", "N"]),
(ps("The snake ate a fruit"), ["D", "N", "V", "D", "N"]),
(ps("I played the game"), ["N", "V", "D", "N"]),
(ps("I watched a movie"), ["N", "V", "D", "N"]),
(ps("Clark fixed the audio"), ["N", "V", "D", "N"]),
(ps("I went to Frary"), ["N", "V", "D", "N"]),
(ps("I go to Pomona"), ["N", "V", "D", "N"]),
(ps("Food are friends not fish"), ["N", "V", "N", "D", "N"]),
(ps("You are reading this"), ["N", "V", "D", "N"]),
(ps("Wonderland protocol is amazing"), ["D", "N", "V", "D"]),
(ps("This is a sentence"), ["D", "V", "D", "N"]),
(ps("I should be doing homework"), ["N", "V", "V", "V", "N"]),
(ps("Computers are tools"), ["N", "V", "N"]),
(ps("The whale swims"), ["D", "N", "V"]),
(ps("A cup is filled"), ["D", "N", "V", "V"]),
(ps("This is a cat"), ["D", "V", "D", "N"]),
(ps("These are trees"), ["D", "V", "N"]),
(ps("The cat is the teacher"), ["D", "N", "V", "D", "N"]),
(ps("I ate food today"), ["N", "V", "N", "N"]),
(ps("I am a human"), ["N", "V", "D", "N"]),
(ps("The cat sleeps"), ["D", "N", "V"]),
(ps("Whales are mammals"), ["N", "V", "N"]),
(ps("I like turtles"), ["N", "V", "N"]),
(ps("A shark ate me"), ["D", "N", "V", "N"]),
(ps("There are mirrors"), ["D", "V", "N"]),
(ps("The bus spins"), ["D", "N", "V"]),
(ps("Computers are machines"), ["N", "V", "N"]),
]
import torch
from fastprogress.fastprogress import progress_bar, master_bar
from random import shuffle
###Output
_____no_output_____
###Markdown
Preparing data for use as NN inputWe can't pass a list of plain text words and tags to a NN. We need to convert them to a more appropriate format.We'll start by creating a unique index for each word and tag.
###Code
word_to_index = {}
tag_to_index = {}
total_words = 0
total_tags = 0
tag_list = []
for words, tags in dataset:
assert len(words) == len(tags)
total_words += len(words)
for word in words:
if word not in word_to_index:
word_to_index[word] = len(word_to_index)
total_tags += len(tags)
for tag in tags:
if tag not in tag_to_index:
tag_to_index[tag] = len(tag_to_index)
tag_list.append(tag)
print(" Vocabulary Indices")
print("-------------------------------")
for word in sorted(word_to_index):
print(f"{word:>14} => {word_to_index[word]:>2}")
print("\nTotal number of words:", total_words)
print("Number of unique words:", len(word_to_index))
print("Tag Indices")
print("-----------")
for tag, index in tag_to_index.items():
print(f" {tag} => {index}")
print("\nTotal number of tags:", total_tags)
print("Number of unique tags:", len(tag_to_index))
###Output
Tag Indices
-----------
D => 0
N => 1
V => 2
Total number of tags: 308
Number of unique tags: 3
###Markdown
Letting the NN parameterize wordsOnce we have a unique identifier for each word, it is useful to start our NN with an [embedding](https://pytorch.org/docs/stable/generated/torch.nn.Embedding.htmltorch.nn.Embedding) layer. This layer converts an index into a vector of values.You can think of each value as indicating something about the word. For example, maybe the first value indicates how much a word conveys happiness vs sadness. Of course, the NN can learn any attributes and it is not limited to thinks like happy/sad, masculine/feminine, etc.**Creating an embedding layer**. An embedding layer is created by telling it the size of the vocabulary (the number of words) and an embedding dimension (how many values to use to represent a word).**Embedding layer input and output**. An embedding layer takes an index and return a matrix.
###Code
def convert_to_index_tensor(words, mapping):
indices = [mapping[w] for w in words]
return torch.tensor(indices, dtype=torch.long)
vocab_size = len(word_to_index)
embed_dim = 6 # Hyperparameter
embed_layer = torch.nn.Embedding(vocab_size, embed_dim)
# i = torch.tensor([word_to_index["the"], word_to_index["dog"]])
indices = convert_to_index_tensor(ps("The dog ate the apple"), word_to_index)
embed_output = embed_layer(indices)
indices.shape, embed_output.shape, embed_output
###Output
_____no_output_____
###Markdown
Adding an LSTM layerThe [LSTM](https://pytorch.org/docs/stable/generated/torch.nn.LSTM.htmltorch.nn.LSTM) layer is in charge of processing embeddings such that the network can output the correct classification. Since this is a recurrent layer, it will take into account past words when it creates an output for the current word.**Creating an LSTM layer**. To create an LSTM you need to tell it the size of its input (the size of an embedding) and the size of its internal cell state.**LSTM layer input and output**. An LSTM takes an embedding (and optionally an initial hidden and cell state) and outputs a value for each word as well as the current hidden and cell state).If you read the linked LSTM documentation you will see that it requires input in this format: (seq_len, batch, input_size)As you can see above, our embedding layer outputs something that is (seq_len, input_size). So, we need to add a dimension in the middle.
###Code
hidden_dim = 10 # Hyperparameter
num_layers = 5 # Hyperparameter
lstm_layer = torch.nn.LSTM(embed_dim, hidden_dim, num_layers=num_layers)
# The LSTM layer expects the input to be in the shape (L, N, E)
# L is the length of the sequence
# N is the batch size (we'll stick with 1 here)
# E is the size of the embedding
lstm_output, _ = lstm_layer(embed_output.unsqueeze(1))
lstm_output.shape
###Output
_____no_output_____
###Markdown
Classifiying the LSTM outputWe can now add a fully connected, [linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.htmltorch.nn.Linear) layer to our NN to learn the correct part of speech (classification).**Creating a linear layer**. We create a linear layer by specifying the shape of the input into the layer and the number of neurons in the linear layer.**Linear layer input and output**. The input is expected to be (input_size, output_size) and the output will be the output of each neuron.
###Code
tag_size = len(tag_to_index)
linear_layer = torch.nn.Linear(hidden_dim, tag_size)
linear_output = linear_layer(lstm_output)
linear_output.shape, linear_output
###Output
_____no_output_____
###Markdown
Training an LSTM model
###Code
# Hyperparameters
valid_percent = 0.2 # Training/validation split
embed_dim = 7 # Size of word embedding
hidden_dim = 8 # Size of LSTM internal state
num_layers = 5 # Number of LSTM layers
learning_rate = 0.1
num_epochs = 500
###Output
_____no_output_____
###Markdown
Creating training and validation datasets
###Code
N = len(dataset)
vocab_size = len(word_to_index) # Number of unique input words
tag_size = len(tag_to_index) # Number of unique output targets
# Shuffle the data so that we can split the dataset randomly
shuffle(dataset)
split_point = int(N * valid_percent)
valid_dataset = dataset[:split_point]
train_dataset = dataset[split_point:]
len(valid_dataset), len(train_dataset)
###Output
_____no_output_____
###Markdown
Creating the Parts of Speech LSTM model
###Code
class POS_LSTM(torch.nn.Module):
"""Part of Speach LSTM model."""
def __init__(self, vocab_size, embed_dim, hidden_dim, num_layers, tag_size):
super().__init__()
self.embed = torch.nn.Embedding(vocab_size, embed_dim)
self.lstm = torch.nn.LSTM(embed_dim, hidden_dim, num_layers=num_layers)
self.linear = torch.nn.Linear(hidden_dim, tag_size)
def forward(self, X):
X = self.embed(X)
X, _ = self.lstm(X.unsqueeze(1))
return self.linear(X)
###Output
_____no_output_____
###Markdown
Training
###Code
def compute_accuracy(dataset):
"""A helper function for computing accuracy on the given dataset."""
total_words = 0
total_correct = 0
model.eval()
with torch.no_grad():
for sentence, tags in dataset:
sentence_indices = convert_to_index_tensor(sentence, word_to_index)
tag_scores = model(sentence_indices).squeeze()
predictions = tag_scores.argmax(dim=1)
total_words += len(sentence)
total_correct += sum(t == tag_list[p] for t, p in zip(tags, predictions))
return total_correct / total_words
model = POS_LSTM(vocab_size, embed_dim, hidden_dim, num_layers, tag_size)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
mb = master_bar(range(num_epochs))
accuracy = compute_accuracy(valid_dataset)
print(f"Validation accuracy before training : {accuracy * 100:.2f}%")
for epoch in mb:
# Shuffle the data for each epoch (stochastic gradient descent)
shuffle(train_dataset)
model.train()
for sentence, tags in progress_bar(train_dataset, parent=mb):
model.zero_grad()
sentence = convert_to_index_tensor(sentence, word_to_index)
tags = convert_to_index_tensor(tags, tag_to_index)
tag_scores = model(sentence)
loss = criterion(tag_scores.squeeze(), tags)
loss.backward()
optimizer.step()
accuracy = compute_accuracy(valid_dataset)
print(f"Validation accuracy after training : {accuracy * 100:.2f}%")
###Output
Validation accuracy before training : 23.44%
###Markdown
Examining resultsHere we look at all words that are misclassified by the model
###Code
print("\nMis-predictions after training on entire dataset")
header = "Word".center(14) + " | True Tag | Prediction"
print(header)
print("-" * len(header))
with torch.no_grad():
for sentence, tags in dataset:
sentence_indices = convert_to_index_tensor(sentence, word_to_index)
tag_scores = model(sentence_indices)
predictions = tag_scores.squeeze().argmax(dim=1)
for word, tag, pred in zip(sentence, tags, predictions):
if tag != tag_list[pred]:
print(f"{word:>14} | {tag} | {tag_list[pred]}")
###Output
Mis-predictions after training on entire dataset
Word | True Tag | Prediction
--------------------------------------
friends | N | D
not | D | N
the | D | N
tree | N | V
had | V | D
a | D | N
electricity | N | D
the | D | N
numbers | N | V
are | V | D
the | D | N
of | D | N
the | D | N
the | D | N
dog | N | V
ate | V | D
the | D | N
the | D | N
bus | N | V
spins | V | D
this | D | N
this | D | N
this | D | N
extension | D | V
cord | N | D
helps | V | N
the | D | N
college | N | V
is | V | D
having | V | N
the | D | N
this | D | N
food | N | D
dog | N | D
mammals | N | D
a | D | N
shark | N | V
ate | V | D
best | D | N
much | D | N
machines | N | D
won't | D | V
work | V | D
the | D | N
class | N | V
lasted | V | D
a | D | N
these | D | N
trees | N | D
outside | N | D
interests | N | V
span | V | D
many | D | N
the | D | N
cat | N | V
sleeps | V | D
the | D | N
a | N | V
hurt | V | D
lamps | N | D
a | D | N
cup | N | V
is | V | D
filled | V | N
finishing | V | D
his | D | N
turtles | N | D
tools | N | D
the | D | N
whale | N | V
swims | V | D
never | D | V
bike | V | D
as | D | N
good | D | N
as | D | N
the | D | N
dog | N | V
barked | V | D
sleeping | V | D
be | V | D
doing | V | N
wonderland | D | N
protocol | N | V
is | V | D
amazing | D | N
the | D | N
cat | N | V
fought | V | D
the | D | N
the | D | N
snake | N | V
ate | V | D
a | D | N
the | D | N
syrup | N | V
covers | V | D
the | D | N
right | D | N
now | D | N
my | D | N
the | D | N
cat | N | V
is | V | D
the | D | N
there | D | N
mirrors | N | D
also | D | V
sounds | V | D
good | D | N
valuable | D | N
###Markdown
Using the model for inference
###Code
new_sentence = "I is a teeth"
# Convert sentence to lowercase words
sentence = new_sentence.lower().split()
# Check that each word is in our vocabulary
for word in sentence:
assert word in word_to_index
# Convert input to a tensor
sentence = convert_to_index_tensor(sentence, word_to_index)
# Compute prediction
predictions = model(sentence)
predictions = predictions.squeeze().argmax(dim=1)
# Print results
for word, tag in zip(new_sentence.split(), predictions):
print(word, "=>", tag_list[tag.item()])
###Output
I => N
is => V
a => D
teeth => N
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.